library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_batch_size_small
|
def test_batch_size_small(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(2, 1))
# Batch size smaller than chunks is legal.
assert not record
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
count_grad_fn
|
def count_grad_fn(grad_fn, name, visited=None):
if visited is None:
visited = set()
if grad_fn in visited:
return 0
visited.add(grad_fn)
if grad_fn is None:
return 0
if grad_fn.__class__.__name__ == name:
return 1
counter = 0
for next_grad_fn, _ in grad_fn.next_functions:
counter += count_grad_fn(next_grad_fn, name, visited=visited)
return counter
model = nn.Sequential(nn.Linear(1, 1))
input = torch.rand(2, 1)
always = Pipe(model, chunks=2, checkpoint="always")
except_last = Pipe(model, chunks=2, checkpoint="except_last")
never = Pipe(model, chunks=2, checkpoint="never")
always_output = always(input)
except_last_output = except_last(input)
never_output = never(input)
assert count_grad_fn(always_output.local_value().grad_fn, "CheckpointBackward") == 2
assert count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.local_value().grad_fn, "CheckpointBackward") == 0
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_checkpoint_mode_invalid
|
def test_checkpoint_mode_invalid(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
Pipe(model, chunks=2, checkpoint="INVALID_CHECKPOINT")
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_checkpoint_mode_when_chunks_1
|
def test_checkpoint_mode_when_chunks_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
# All checkpoint modes are fine.
Pipe(model, chunks=1, checkpoint="except_last")
Pipe(model, chunks=1, checkpoint="always")
Pipe(model, chunks=1, checkpoint="never")
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
test_get_schedule_class
|
def test_get_schedule_class(self):
# List of all expected schedule names
schedule_names = [
"1F1B",
"Interleaved1F1B",
"GPipe",
"FlexibleInterleaved1F1B",
"LoopedBFS",
"PipelineScheduleSingle",
"PipelineScheduleMulti",
]
# Test each schedule name
for name in schedule_names:
with self.subTest(name=name):
schedule_class = get_schedule_class(name)
self.assertIsNotNone(
schedule_class, f"Class for {name} should not be None"
)
self.assertTrue(
issubclass(schedule_class, _PipelineSchedule),
f"{name} should be a subclass of _PipelineSchedule",
)
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class ScheduleTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
test_pipeline_order_flex_and_zero_bubble
|
def test_pipeline_order_flex_and_zero_bubble(self, ScheduleClass):
for num_local_stages, num_microbatches, group_size in self.test_cases:
with self.subTest(
num_local_stages=num_local_stages,
num_microbatches=num_microbatches,
group_size=group_size,
):
warmups_ops_last_stage = (num_local_stages - 1) * (
num_microbatches // max(1, num_microbatches // group_size)
)
warmup_ops = warmups_ops_last_stage + 2 * (group_size - 1)
warmup_ops = min(warmup_ops, num_microbatches * num_local_stages)
for i in range(2):
num_stages = num_local_stages * group_size
stages = [
MockPipelineStage(group_size=group_size, num_stages=num_stages)
for i in range(num_local_stages)
]
schedule = ScheduleClass(
stages, num_microbatches, enable_zero_bubble=(i == 0)
)
formatted_pipeline_order = _format_pipeline_order(
schedule.pipeline_order
)
# print(formatted_pipeline_order)
_validate_pipeline_order(
schedule.pipeline_order,
num_microbatches,
num_stages,
enable_zero_bubble=(i == 0),
)
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class TestSchedulePlan(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
test_send_recv
|
def test_send_recv(self, test_info):
"""Tests the lowering pass that adds send/recv ops to a compute-only schedule."""
compute_sch = {
rank: self._parse_actions(test_info["compute"][rank])
for rank in test_info["compute"]
}
expected_comms_sch = {
rank: self._parse_actions(test_info["comms"][rank])
for rank in test_info["comms"]
}
comms_sch = _add_send_recv(
compute_sch, test_info["stage_to_rank"], test_info["num_stages"]
)
for rank in expected_comms_sch:
for i, (expected, actual) in enumerate(
zip(expected_comms_sch[rank], comms_sch[rank])
):
self.assertEqual(
expected,
actual,
(
f"Mismatch on rank {rank} at position {i}."
f"\nExpected: {expected_comms_sch[rank]}"
f"\nActual: {comms_sch[rank]}"
),
)
self.assertEqual(len(comms_sch[rank]), len(expected_comms_sch[rank]))
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class TestScheduleLowering(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_backward.py
|
test_stage_backward_weight_multiple_iters
|
def test_stage_backward_weight_multiple_iters(self):
# MLP as a stage module
mod = MLPModule(d_hid)
inputs = []
for _ in range(10):
x = torch.randn(batch_size, d_hid)
inputs.append(x)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod)
ref_inputs = []
for x in inputs:
ref_inputs.append(x.detach().requires_grad_(x.requires_grad))
ref_target = target.detach()
# Forward, then backward of loss with respect to inputs
for x in inputs:
out = mod(x)
loss = loss_fn(out, target)
dinputs, param_groups = stage_backward_input(
stage_outputs=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# backward of loss with respect to weights
stage_backward_weight(mod.parameters(), param_groups)
# Run reference
for ref_x in ref_inputs:
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
|
import copy
from model_registry import MLPModule
import torch
from torch.distributed.pipelining._backward import (
stage_backward,
stage_backward_input,
stage_backward_weight,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
batch_size = 256
class StageBackwardTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_microbatch.py
|
test_split_and_merge
|
def test_split_and_merge(self):
x0 = torch.randn(128, d_hid)
x1 = torch.randn(256, d_hid)
x2 = torch.randn(512, d_hid)
args = (x0, x1, x2)
kwargs = {"x0": x0, "x1": x1, "x2": x2}
# Default chunking: dim 0
arg_chunks, kwarg_chunks = split_args_kwargs_into_chunks(args, kwargs, 2)
assert len(arg_chunks) == 2
assert len(kwarg_chunks) == 2
assert arg_chunks[0][0].shape == torch.Size([64, d_hid])
assert arg_chunks[1][0].shape == torch.Size([64, d_hid])
assert arg_chunks[0][1].shape == torch.Size([128, d_hid])
assert arg_chunks[0][2].shape == torch.Size([256, d_hid])
assert kwarg_chunks[0]["x0"].shape == torch.Size([64, d_hid])
assert kwarg_chunks[0]["x1"].shape == torch.Size([128, d_hid])
assert kwarg_chunks[1]["x2"].shape == torch.Size([256, d_hid])
# Merge chunks back together
merged_args = merge_chunks(
arg_chunks,
(TensorChunkSpec(0), TensorChunkSpec(0), TensorChunkSpec(0)),
)
torch.testing.assert_close(merged_args, args)
merged_kwargs = merge_chunks(
kwarg_chunks,
{
"x0": TensorChunkSpec(0),
"x1": TensorChunkSpec(0),
"x2": TensorChunkSpec(0),
},
)
torch.testing.assert_close(merged_kwargs, kwargs)
print("Microbatch test passed")
|
from model_registry import ModelWithKwargs
import torch
from torch.distributed.pipelining import pipeline
from torch.distributed.pipelining.microbatch import (
merge_chunks,
split_args_kwargs_into_chunks,
TensorChunkSpec,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
class MicrobatchTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_microbatch.py
|
test_chunk_spec
|
def test_chunk_spec(self):
mod = ModelWithKwargs()
batch_size = ModelWithKwargs.DEFAULT_BATCH_SIZE
x = torch.randn(batch_size, d_hid)
y = torch.randn(batch_size, d_hid)
num_chunks = 4
args_chunk_spec = TensorChunkSpec.from_tuple((0,))
kwargs_chunk_spec = TensorChunkSpec.from_dict({"y": 0})
args_split, kwargs_split = split_args_kwargs_into_chunks(
(x,),
{"y": y},
num_chunks,
args_chunk_spec,
kwargs_chunk_spec,
)
pipe = pipeline(
mod,
mb_args=args_split[0],
mb_kwargs=kwargs_split[0],
)
ref = mod(x, y)
out = pipe(x, y)[0]
torch.testing.assert_close(out, ref)
print(f"equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}")
|
from model_registry import ModelWithKwargs
import torch
from torch.distributed.pipelining import pipeline
from torch.distributed.pipelining.microbatch import (
merge_chunks,
split_args_kwargs_into_chunks,
TensorChunkSpec,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
class MicrobatchTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_pipe.py
|
forward
|
def forward(self, x, y):
x = torch.mm(x, self.mm_param1) # mutli-use param
skip_connection = x
x = x + y
x = torch.relu(x)
pipe_split()
x = torch.mm(x, self.mm_param1) # mutli-use param
x = self.lin1(x)
pipe_split()
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
pipe_split()
x = self.lin2(x)
x = torch.relu(x)
return x
|
from model_registry import MLPModule, ModelWithParamAlias
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
d_hid = 512
microbatch_size = 16
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_pipe.py
|
forward
|
def forward(self, x, y):
x = torch.mm(x, self.mm_param1) # mutli-use param
skip_connection = x
x = x + y
x = torch.relu(x)
pipe_split()
x = torch.mm(x, self.mm_param1) # mutli-use param
x = self.lin1(x)
pipe_split()
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
pipe_split()
x = self.lin2(x)
x = torch.relu(x)
return x
|
from model_registry import MLPModule, ModelWithParamAlias
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
d_hid = 512
microbatch_size = 16
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_pipe.py
|
test_model_split
|
def test_model_split(self, ModelClass):
mod = ModelClass()
x = torch.randn(microbatch_size, d_hid)
y = torch.randn(microbatch_size, d_hid)
pipe = pipeline(
mod,
mb_args=(x, y),
)
assert (
pipe.num_stages == EXPECTED_N_STAGES[ModelClass]
), f"nstages = {pipe.num_stages}, expect {EXPECTED_N_STAGES[ModelClass]}"
ref_out = mod(x, y)
out = pipe(x, y)[0]
torch.testing.assert_close(out, ref_out)
print(f"equivalence test passed {torch.sum(out)} ref {torch.sum(ref_out)}")
# Check qualname
# state_dict.keys include both parameters and persistent buffers
old_names = set(mod.state_dict().keys())
new_names = set()
for idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(idx)
stage_fqns = set(stage_mod.state_dict().keys())
assert stage_fqns.issubset(old_names)
new_names.update(stage_fqns)
if CHECK_FQN_SET_EQUALITY:
assert (
old_names == new_names
), f"""
old names {old_names}
new names {new_names}
"""
print("Qualname check passed")
|
from model_registry import MLPModule, ModelWithParamAlias
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
d_hid = 512
microbatch_size = 16
EXPECTED_N_STAGES = {
ExampleCode: 4,
MultiMLP: 4,
ModelWithParamAlias: 2,
}
CHECK_FQN_SET_EQUALITY = False
class PipeTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
__init__
|
def __init__(self, *args, **kwargs):
# Mock the necessary attributes
self.num_stages = kwargs.get("num_stages", 1)
self.group_size = kwargs.get("group_size", 1)
self.group_rank = kwargs.get("group_rank", 0)
self.group = kwargs.get("group", None)
self.stage_index_to_group_rank = kwargs.get("stage_index_to_group_rank", None)
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class MockPipelineStage(_PipelineStageBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
_create_grad_recv_info
|
def _create_grad_recv_info(self, *args, **kwargs):
return None
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class MockPipelineStage(_PipelineStageBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
_prepare_forward_infra
|
def _prepare_forward_infra(self, n_microbatches):
pass
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class MockPipelineStage(_PipelineStageBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule.py
|
_prepare_backward_infra
|
def _prepare_backward_infra(self, n_microbatches):
pass
|
import logging
from typing import List
import torch
from torch.distributed.pipelining import (
ScheduleFlexibleInterleaved1F1B,
ScheduleInterleaved1F1B,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import (
_Action,
_add_send_recv,
_add_unshard_reshard,
_format_pipeline_order,
_PipelineSchedule,
_validate_pipeline_order,
B,
F,
get_schedule_class,
RECV_F,
RESHARD,
SEND_B,
UNSHARD,
W,
)
from torch.distributed.pipelining.stage import _PipelineStageBase
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
TestCase,
)
logger = logging.getLogger(__name__)
class MockPipelineStage(_PipelineStageBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
dw_builder
|
def dw_builder(self):
"""This simulates a function attached to a model with a custom backward.
Each call to builder gives a new dw_runner that has some updated state to compute the latest dw.
"""
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
print(f"dw called {self.i}th time")
self.i += 1
return dw_runner
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
class CustomState:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
dw_runner
|
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
print(f"dw called {self.i}th time")
self.i += 1
return dw_runner
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
_run_step
|
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
# intended to run this code on all ranks, but the problem is if rank0 throws,
# it won't perform the send that unblocks rank 1.
# TODO(whc) can't test this until fixing args/kwargs issue
# with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
# _run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
test_custom_dw_errors
|
def test_custom_dw_errors(self):
"""Tests expected errors are raised"""
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
stage_with_dw_builder = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
input_args=x.chunk(chunks)[0],
dw_builder=lambda: None,
)
with self.assertRaisesRegex(AssertionError, "backward_one_chunk"):
stage_with_dw_builder.backward_weight_one_chunk(bwd_chunk_id=0)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
class StageTest(MultiProcContinousTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_transformer.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.net1 = torch.nn.Linear(d_hid, d_hid)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.distributed.pipelining import pipeline, SplitPoint
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 16
n_layers = 8
microbatch_size = 4
class MLPModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_transformer.py
|
forward
|
def forward(self, x):
x = self.net1(x)
x = self.relu(x)
x = self.net2(x)
return x
|
import torch
from torch.distributed.pipelining import pipeline, SplitPoint
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 16
n_layers = 8
microbatch_size = 4
class MLPModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_transformer.py
|
test_ir
|
def test_ir(self):
transformer = TransformerLike()
x = torch.randn(microbatch_size, d_hid)
# Split into 2 stages
num_stages = 2
split_spec = {f"layers.{n_layers // num_stages}": SplitPoint.BEGINNING}
pipe = pipeline(
transformer,
(x,),
split_spec=split_spec,
)
assert pipe.num_stages == num_stages, f"{pipe.num_stages=}, expect {num_stages}"
def get_layers(module):
layers = [name for name, _ in module.layers.named_children()]
return layers
# Collect all layers in pipe
layers = []
for stage_idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(stage_idx)
layers += get_layers(stage_mod)
# Check layer completeness
orig_layers = get_layers(transformer)
assert sorted(layers) == sorted(orig_layers), f"{layers} != {orig_layers}"
print("Layers matched!")
# Check equivalence
ref = transformer(x)
out = pipe(x)[0]
torch.testing.assert_close(out, ref)
print(f"Equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}")
|
import torch
from torch.distributed.pipelining import pipeline, SplitPoint
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 16
n_layers = 8
microbatch_size = 4
class TransformerTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_transformer.py
|
get_layers
|
def get_layers(module):
layers = [name for name, _ in module.layers.named_children()]
return layers
# Collect all layers in pipe
layers = []
for stage_idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(stage_idx)
layers += get_layers(stage_mod)
# Check layer completeness
orig_layers = get_layers(transformer)
assert sorted(layers) == sorted(orig_layers), f"{layers} != {orig_layers}"
print("Layers matched!")
# Check equivalence
ref = transformer(x)
out = pipe(x)[0]
torch.testing.assert_close(out, ref)
print(f"Equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}")
|
import torch
from torch.distributed.pipelining import pipeline, SplitPoint
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 16
n_layers = 8
microbatch_size = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_unflatten.py
|
test_unflatten
|
def test_unflatten(self):
x = torch.randn(1, 16, 256, 256)
constant = torch.ones(1, 16, 256, 256)
mod = M()
pipe = pipeline(
mod,
(x,),
{"constant": constant},
)
assert pipe.num_stages == 4
orig_state_dict = mod.state_dict()
# Check qualnames
for stage_idx in range(pipe.num_stages):
stage_mod = pipe.get_stage_module(stage_idx)
for param_name, param in stage_mod.named_parameters():
assert (
param_name in orig_state_dict
), f"{param_name} not in original state dict"
print("Param qualname test passed")
# Check equivalence
ref = mod(x, constant)
out = pipe(x, constant)[0]
torch.testing.assert_close(out, ref)
print(f"Equivalence test passed {torch.sum(out)} ref {torch.sum(ref)}")
|
import torch
from torch.distributed.pipelining import pipe_split, pipeline
from torch.testing._internal.common_utils import run_tests, TestCase
class UnflattenTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule_multiproc.py
|
test_schedule_with_weight_update_mlp_e2e
|
def test_schedule_with_weight_update_mlp_e2e(self, ScheduleClass):
stages_per_rank = 2
n_stages = stages_per_rank * self.world_size
full_mod = MultiMLPWithDw(d_hid, n_layers=n_stages)
full_mod.to(self.device)
ref_mod = copy.deepcopy(full_mod)
x = torch.randn(batch_size, d_hid, device=self.device)
with torch.no_grad():
y = ref_mod(x)
# Add a small perturbation
target = y + torch.randn(batch_size, d_hid, device=self.device)
ref_loss_fn = torch.nn.MSELoss(reduction="sum")
full_loss_fn = torch.nn.MSELoss(reduction="sum")
full_mod.toggle()
# Get a submodule, e.g. `layers.0` or `layers.1`
stage_indices = [
self.rank + i * self.world_size for i in range(stages_per_rank)
]
submod_names = [f"layers.{i}" for i in stage_indices]
stage_modules = [
full_mod.get_submodule(submod_name) for submod_name in submod_names
]
# Run reference
for _ in range(2):
ref_stage_modules = [
ref_mod.get_submodule(submod_name) for submod_name in submod_names
]
for stage_module in ref_stage_modules:
stage_module.zero_grad()
ref_mod.zero_grad()
ref_out = ref_mod(x)
ref_loss = ref_loss_fn(ref_out, target)
ref_loss.backward()
class CustomState:
def __init__(self, stage_module, stage_idx, rank):
self.i = 0
self.stage_module = stage_module
self.stage_idx = stage_idx
self.rank = rank
def dw_builder(self):
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
self.i += 1
print(
f"[Rank {self.rank}] dw_count={self.i} stage={self.stage_idx}"
)
self.stage_module.compute_dW()
return dw_runner
cs = {}
for stage_module, stage_idx in zip(stage_modules, stage_indices):
cs[stage_idx] = CustomState(stage_module, stage_idx, self.rank)
# Create a pipeline stage to wrap that submodule
chunks = 2
input_args = x.chunk(chunks)[0]
stages = [
PipelineStage(
stage_module,
stage_idx,
n_stages,
self.device,
input_args=input_args,
dw_builder=cs[stage_idx].dw_builder,
)
for stage_module, stage_idx in zip(stage_modules, stage_indices)
]
# Attach to a schedule
schedule = ScheduleClass(
stages, chunks, loss_fn=full_loss_fn, enable_zero_bubble=True
)
for _ in range(2):
# Zero gradients
for stage_module in stage_modules:
stage_module.zero_grad()
if self.rank == 0:
schedule.step(x)
elif self.rank == self.world_size - 1:
losses = []
out = schedule.step(target=target, losses=losses)
else:
schedule.step()
dist.barrier()
# Last rank checks result
if self.rank == self.world_size - 1:
# Check output
torch.testing.assert_close(out, ref_out)
# Check loss
# Since the reduction used in the loss function above is "sum", we use
# "sum" here to reduce microbatch losses into a single value too.
pipe_loss = sum(losses)
torch.testing.assert_close(pipe_loss, ref_loss)
# Every rank checks gradients
for stage_module, submod_name in zip(stage_modules, submod_names):
# Get corresponding submodule from reference model
ref_submod = ref_mod.get_submodule(submod_name)
# Check gradients per parameter
for name, p in stage_module.named_parameters():
ref_p = ref_submod.get_parameter(name)
torch.testing.assert_close(p.grad, ref_p.grad, rtol=1e-5, atol=4e-5)
|
import copy
import logging
import os
import sys
import tempfile
from model_registry import ModelWithKwargs, MultiMLP, MultiMLPWithDw
from schedule_registry import ScheduleUnbalanced, ScheduleVShaped, ScheduleWithW
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
_ScheduleForwardOnly,
pipeline,
PipelineStage,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import _PipelineScheduleRuntime
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
logger = logging.getLogger(__name__)
d_hid = 512
batch_size = 256
class ScheduleTest(MultiProcContinousTest):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule_multiproc.py
|
__init__
|
def __init__(self, stage_module, stage_idx, rank):
self.i = 0
self.stage_module = stage_module
self.stage_idx = stage_idx
self.rank = rank
|
import copy
import logging
import os
import sys
import tempfile
from model_registry import ModelWithKwargs, MultiMLP, MultiMLPWithDw
from schedule_registry import ScheduleUnbalanced, ScheduleVShaped, ScheduleWithW
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
_ScheduleForwardOnly,
pipeline,
PipelineStage,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import _PipelineScheduleRuntime
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
logger = logging.getLogger(__name__)
d_hid = 512
batch_size = 256
class CustomState:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_schedule_multiproc.py
|
dw_runner
|
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
self.i += 1
print(
f"[Rank {self.rank}] dw_count={self.i} stage={self.stage_idx}"
)
self.stage_module.compute_dW()
return dw_runner
|
import copy
import logging
import os
import sys
import tempfile
from model_registry import ModelWithKwargs, MultiMLP, MultiMLPWithDw
from schedule_registry import ScheduleUnbalanced, ScheduleVShaped, ScheduleWithW
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
_ScheduleForwardOnly,
pipeline,
PipelineStage,
Schedule1F1B,
ScheduleFlexibleInterleaved1F1B,
ScheduleGPipe,
ScheduleInterleaved1F1B,
ScheduleInterleavedZeroBubble,
ScheduleLoopedBFS,
)
from torch.distributed.pipelining.schedules import _PipelineScheduleRuntime
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
logger = logging.getLogger(__name__)
d_hid = 512
batch_size = 256
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
get_dtype_change_hook
|
def get_dtype_change_hook(new_dtype):
"""A simple hook for simulating mixed precision"""
def dtype_change_hook(module, input, output):
def f(x):
return x.to(new_dtype)
return tree_map_only(torch.Tensor, f, output)
return dtype_change_hook
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_default_stream_cuda
|
def test_default_stream_cuda(self):
stream = default_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.default_stream()
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestDefaultStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_use_device_cuda
|
def test_use_device_cuda(self):
with use_device(torch.device("cuda")):
pass
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestUseDevice:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_use_stream_cuda
|
def test_use_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
assert current_stream(torch.device("cuda")) == stream
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestUseStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_get_device_cuda
|
def test_get_device_cuda(self):
stream = current_stream(torch.device("cuda"))
assert get_device(stream).type == "cuda"
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestGetDevice:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
_test_wait_stream
|
def _test_wait_stream(self, source, target, cuda_sleep=None):
with use_stream(target):
if is_cuda(target):
cuda_sleep(0.5)
x = torch.ones(100, 100, device=get_device(target))
wait_stream(source, target)
with use_stream(source):
assert x.sum().item() == 10000
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestWaitStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
f
|
def f(x):
return x.to(new_dtype)
return tree_map_only(torch.Tensor, f, output)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
get_flatten_hook
|
def get_flatten_hook():
"""A simple hook for simulating wrong model output shape"""
def flatten_hook(module, input, output):
def f(x):
return x.flatten()
return tree_map_only(torch.Tensor, f, output)
return flatten_hook
class StageTest(MultiProcContinousTest):
@classmethod
def backend_str(cls) -> str:
# Testing with NCCL backend
return "nccl"
@classmethod
def setUpClass(cls):
"""
Class-scope test fixture. Run once for entire test class, before any test starts.
Set up the device.
"""
super().setUpClass()
dev_id = cls.rank % torch.cuda.device_count()
cls.device = torch.device(f"cuda:{dev_id}")
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("ModelClass", [ExampleCode, MultiMLP])
def test_tracer(self, ModelClass):
mod = ModelClass(d_hid)
mod.to(self.device)
x = torch.randn(batch_size, d_hid, device=self.device)
x_mb = x.chunk(chunks)[0]
split_spec = mod.split_spec if hasattr(mod, "split_spec") else None
pipe = pipeline(
mod,
mb_args=(x_mb,),
split_spec=split_spec,
)
stage = pipe.build_stage(
self.rank,
self.device,
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
# intended to run this code on all ranks, but the problem is if rank0 throws,
# it won't perform the send that unblocks rank 1.
# TODO(whc) can't test this until fixing args/kwargs issue
# with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
# _run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
@parametrize("ModelClass", [ModelWithKwargs])
def test_tracer_kwargs(self, ModelClass):
mod = ModelClass(d_hid)
mod.to(self.device)
x = torch.randn(batch_size, d_hid, device=self.device)
y = torch.randn(batch_size, d_hid, device=self.device)
x_mb = x.chunk(chunks)[0]
y_mb = y.chunk(chunks)[0]
pipe = pipeline(
mod,
mb_args=(x_mb,),
mb_kwargs={"y": y_mb},
)
stage_mod = pipe.get_stage_module(self.rank)
# Test build_stage
stage = build_stage(
stage_mod,
self.rank,
pipe.info(),
self.device,
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x, y=y)
else:
return schedule.step()
# Last rank checks result
out = _run_step(x)
if self.rank == self.world_size - 1:
ref_out = mod(x, y=y)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_manual(self):
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
stage = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
input_args=x.chunk(chunks)[0],
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = full_mod(x)
torch.testing.assert_close(out, ref_out)
if self.rank == 0:
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage_mod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage_mod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_custom_dw_with_fb_schedule(self):
"""Tests that separate weight grad function 'dw_runner' gets run under a schedule that's only aware of F/B."""
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
class CustomState:
def __init__(self) -> None:
self.i = 0
def dw_builder(self):
"""This simulates a function attached to a model with a custom backward.
Each call to builder gives a new dw_runner that has some updated state to compute the latest dw.
"""
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
print(f"dw called {self.i}th time")
self.i += 1
return dw_runner
cs = CustomState()
stage = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
input_args=x.chunk(chunks)[0],
dw_builder=cs.dw_builder,
)
# Attach to a schedule
schedule = ScheduleGPipe(
stage, chunks, loss_fn=torch.nn.MSELoss(reduction="sum")
)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
elif self.rank == self.world_size - 1:
return schedule.step(target=target)
else:
return schedule.step()
out = _run_step(x)
self.assertEqual(cs.i, chunks)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = full_mod(x)
torch.testing.assert_close(out, ref_out)
if self.rank == 0:
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
@requires_nccl()
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs")
def test_custom_dw_errors(self):
"""Tests expected errors are raised"""
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
stage_with_dw_builder = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
input_args=x.chunk(chunks)[0],
dw_builder=lambda: None,
)
with self.assertRaisesRegex(AssertionError, "backward_one_chunk"):
stage_with_dw_builder.backward_weight_one_chunk(bwd_chunk_id=0)
instantiate_parametrized_tests(StageTest)
if __name__ == "__main__":
# Check if GPU and NCCL are available
if not (
dist.is_available()
and dist.is_nccl_available()
and torch.cuda.device_count() > 1
):
print(
"c10d NCCL not available or not enough GPUs, skipping tests",
file=sys.stderr,
)
sys.exit(0)
rank = int(os.getenv("RANK", -1))
world_size = int(os.getenv("WORLD_SIZE", 2))
if rank != -1:
# Launched with torchrun or other multi-proc launchers. Directly run the test.
StageTest.run_rank(rank, world_size)
else:
# Launched as a single process. Spawn subprocess to run the tests.
# Also need a rendezvous file for `init_process_group` purpose.
rdvz_file = tempfile.NamedTemporaryFile(delete=False).name
torch.multiprocessing.spawn(
StageTest.run_rank,
nprocs=world_size,
args=(world_size, rdvz_file),
)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
f
|
def f(x):
return x.to(new_dtype)
return tree_map_only(torch.Tensor, f, output)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
_run_step
|
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
# intended to run this code on all ranks, but the problem is if rank0 throws,
# it won't perform the send that unblocks rank 1.
# TODO(whc) can't test this until fixing args/kwargs issue
# with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
# _run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
_run_step
|
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
# intended to run this code on all ranks, but the problem is if rank0 throws,
# it won't perform the send that unblocks rank 1.
# TODO(whc) can't test this until fixing args/kwargs issue
# with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
# _run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_stage.py
|
_run_step
|
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
if self.rank == 0:
# intended to run this code on all ranks, but the problem is if rank0 throws,
# it won't perform the send that unblocks rank 1.
# TODO(whc) can't test this until fixing args/kwargs issue
# with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
# _run_step(torch.randn(batch_size + 1, d_hid, device=self.device))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x.to(torch.int32))
# output of stage's mlp layer will be flattened by this hook, the stage should err
handle = stage.submod.register_forward_hook(get_flatten_hook())
with self.assertRaisesRegex(PipeliningShapeError, "shape mismatch"):
_run_step(x)
handle.remove()
stage.submod.register_forward_hook(get_dtype_change_hook(torch.bfloat16))
with self.assertRaisesRegex(PipeliningShapeError, "dtype mismatch"):
_run_step(x)
|
import os
import sys
import tempfile
from model_registry import ExampleCode, ModelWithKwargs, MultiMLP
import torch
import torch.distributed as dist
from torch.distributed.pipelining import (
build_stage,
pipeline,
PipelineStage,
ScheduleGPipe,
)
from torch.distributed.pipelining._utils import PipeliningShapeError
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_distributed import (
MultiProcContinousTest,
requires_nccl,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
skip_but_pass_in_sandcastle_if,
)
from torch.utils._pytree import tree_map_only
d_hid = 512
batch_size = 256
chunks = 4
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_micro_pipeline_tp.py
|
_fp8_all_gather
|
def _fp8_all_gather(tensor: torch.Tensor, gather_dim: int, group_name: str):
# We don't yet have a canonical pattern for fp8 all-gather. This is a
# pattern observed in DTensor + float8_experimental.
ag = all_gather_tensor(tensor, gather_dim=0, group=group_name)
if gather_dim == 0:
return ag.view(tensor.dtype)
chunks = ag.chunk(_get_group_size_by_name(group_name))
chunks = [chunk.view(torch.uint8) for chunk in chunks]
return torch.cat(chunks, dim=gather_dim).view(tensor.dtype)
@instantiate_parametrized_tests
class MicroPipelineTPTest(TestCase):
def setUp(self):
torch._inductor.config._micro_pipeline_tp = True
self.rank = 0
self.world_size = 2
torch.cuda.set_device("cuda:0")
store = FakeStore()
dist.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
def tearDown(self):
dist.destroy_process_group()
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_find_all_gather_patterns(self):
group = dist.group.WORLD
def func(inp: torch.Tensor) -> torch.Tensor:
a = all_gather_tensor(inp, gather_dim=0, group=group.group_name)
b = all_gather_tensor(inp, gather_dim=1, group=group.group_name)
c = _fp8_all_gather(inp, gather_dim=0, group_name=group.group_name)
d = _fp8_all_gather(inp, gather_dim=1, group_name=group.group_name)
return a, b, c
inp = torch.rand(64, 32, device="cuda")
gm = _make_post_grad_fx(func, inp)
all_gathers = find_all_gather_patterns(gm.graph)
self.assertEqual(len(all_gathers), 4)
# If this test fails, please update find_all_gather_patterns instead of
# modifying the following assertions.
for all_gather in all_gathers:
self.assertEqual(
all_gather.ag_node.target,
torch.ops._c10d_functional.all_gather_into_tensor.default,
)
self.assertEqual(all_gather.group_name, group.group_name)
self.assertEqual(all_gathers[0].gather_dim, 0)
self.assertEqual(
all_gathers[0].res_node.target,
torch.ops._c10d_functional.wait_tensor.default,
)
self.assertEqual(all_gathers[1].gather_dim, 1)
self.assertEqual(
all_gathers[1].res_node.target,
torch.ops.aten.cat.default,
)
self.assertEqual(all_gathers[2].gather_dim, 0)
self.assertEqual(
all_gathers[2].res_node.target,
torch.ops.aten.view.dtype,
)
self.assertEqual(all_gathers[3].gather_dim, 1)
self.assertEqual(
all_gathers[3].res_node.target,
torch.ops.aten.view.dtype,
)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_find_reduce_scatter_patterns(self):
group = dist.group.WORLD
def func(inp: torch.Tensor) -> torch.Tensor:
a = reduce_scatter_tensor(inp, "sum", scatter_dim=0, group=group.group_name)
b = reduce_scatter_tensor(inp, "avg", scatter_dim=1, group=group.group_name)
return a, b
inp = torch.rand(64, 32, device="cuda")
gm = make_fx(func)(inp)
reduce_scatters = find_reduce_scatter_patterns(gm.graph)
self.assertEqual(len(reduce_scatters), 2)
# If this test fails, please update find_reduce_scatter_patterns
# instead of modifying the following assertions.
for reduce_scatter in reduce_scatters:
self.assertEqual(
reduce_scatter.input_node.op,
"placeholder",
)
self.assertEqual(
reduce_scatter.rs_node.target,
torch.ops._c10d_functional.reduce_scatter_tensor.default,
)
self.assertEqual(
reduce_scatter.res_node.target,
torch.ops._c10d_functional.wait_tensor.default,
)
self.assertEqual(reduce_scatter.group_name, group.group_name)
self.assertEqual(reduce_scatters[0].reduce_op, "sum")
self.assertEqual(reduce_scatters[0].scatter_dim, 0)
self.assertEqual(reduce_scatters[1].reduce_op, "avg")
self.assertEqual(reduce_scatters[1].scatter_dim, 1)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
def test_get_unexposed_collectives(self):
group = dist.group.WORLD
def func(inp: torch.Tensor) -> torch.Tensor:
a = inp @ inp.T
# b is unexposed (hidden by a)
b = all_gather_tensor(inp, gather_dim=0, group=group.group_name)
c = b @ inp.T
# d is unexposed (hidden by c)
d = reduce_scatter_tensor(b, "avg", scatter_dim=0, group=group.group_name)
# e is exposed
e = all_gather_tensor(d, gather_dim=0, group=group.group_name)
return a, c, e
inp = torch.rand(64, 32, device="cuda")
gm = make_fx(func)(inp)
overlappable_collectives = _get_unexposed_collectives(gm.graph)
self.assertEqual(
list(map(str, overlappable_collectives)),
["all_gather_into_tensor", "reduce_scatter_tensor"],
)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("gather_dim", [0, 1, 2])
@fresh_inductor_cache()
def test_fuse_all_gather_matmul(self, A_dims, gather_dim):
if gather_dim >= A_dims:
return
group = dist.group.WORLD
def func(A_shard: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
A = all_gather_tensor(A_shard, gather_dim=gather_dim, group=group)
return A @ B
if A_dims == 2:
A_shard_shape = [64, 32]
elif A_dims == 3:
A_shard_shape = [2, 64, 32]
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
A_shard_shape[gather_dim] //= self.world_size
A_shard = torch.rand(*A_shard_shape, device="cuda")
B = torch.rand(32, 16, device="cuda")
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, A_shard, B)
if gather_dim == A_dims - 1:
self.assertNotIn("fused_all_gather_matmul", code)
self.assertIn("all_gather_into_tensor", code)
else:
# Decomposing the matmul on the K dimension is not supported
self.assertIn("fused_all_gather_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("gather_dim", [0, 1, 2])
@fresh_inductor_cache()
def test_fuse_all_gather_scaled_matmul(self, A_dims, gather_dim):
if gather_dim >= A_dims:
return
group = dist.group.WORLD
def func(
A_shard: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
out_dtype: Optional[torch.dtype],
) -> torch.Tensor:
A = _fp8_all_gather(
A_shard, gather_dim=gather_dim, group_name=group.group_name
)
if len(A_shard.shape) > 2:
C = torch._scaled_mm(
A.flatten(0, -2), B, A_scale, B_scale, out_dtype=out_dtype
)
return C.view(*A.shape[:-1], -1)
else:
return torch._scaled_mm(A, B, A_scale, B_scale, out_dtype=out_dtype)
if A_dims == 2:
A_shard_shape = [64, 32]
elif A_dims == 3:
A_shard_shape = [2, 64, 32]
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
A_shard_shape[gather_dim] //= self.world_size
A_shard = torch.rand(*A_shard_shape, device="cuda").to(torch.float8_e4m3fn)
B = torch.rand(16, 32, device="cuda").to(torch.float8_e4m3fn).T
A_scale = torch.tensor(0.1, device="cuda")
B_scale = torch.tensor(0.1, device="cuda")
gm = _make_post_grad_fx(func, A_shard, B, A_scale, B_scale, torch.bfloat16)
with _test_mode():
micro_pipeline_tp_pass(gm.graph)
if gather_dim == A_dims - 1:
self.assertNotIn("fused_all_gather_scaled_matmul", str(gm.graph))
self.assertIn("all_gather_into_tensor", str(gm.graph))
else:
# Decomposing the matmul on the K dimension is not supported
self.assertIn("fused_all_gather_scaled_matmul", str(gm.graph))
self.assertNotIn("all_gather_into_tensor", str(gm.graph))
if torch.cuda.get_device_capability() < (8, 9):
return
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(
compiled, A_shard, B, A_scale, B_scale, torch.bfloat16
)
if gather_dim == A_dims - 1:
self.assertNotIn("fused_all_gather_scaled_matmul", code)
self.assertIn("all_gather_into_tensor", code)
else:
# Decomposing the matmul on the K dimension is not supported
self.assertIn("fused_all_gather_scaled_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("scatter_dim", [0, 1, 2])
@fresh_inductor_cache()
def test_fuse_matmul_reduce_scatter(self, A_dims, scatter_dim):
if scatter_dim >= A_dims:
return
group = dist.group.WORLD
def func(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
return reduce_scatter_tensor(A @ B, "avg", scatter_dim, group)
if A_dims == 2:
A = torch.rand(64, 32, device="cuda")
elif A_dims == 3:
A = torch.rand(2, 64, 32, device="cuda")
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
B = torch.rand(32, 16, device="cuda")
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, A, B)
self.assertIn("fused_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("scatter_dim", [0, 1, 2])
@fresh_inductor_cache()
def test_fuse_scaled_matmul_reduce_scatter(self, A_dims, scatter_dim):
if scatter_dim >= A_dims:
return
group = dist.group.WORLD
def func(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
out_dtype: torch.dtype,
) -> torch.Tensor:
if len(A.shape) > 2:
C = torch._scaled_mm(
A.flatten(0, -2), B, A_scale, B_scale, out_dtype=out_dtype
)
C = C.view(*A.shape[:-1], B.shape[1])
else:
C = torch._scaled_mm(A, B, A_scale, B_scale, out_dtype=out_dtype)
return reduce_scatter_tensor(C, "avg", scatter_dim, group)
if A_dims == 2:
A = torch.rand(64, 32, device="cuda").to(torch.float8_e4m3fn)
elif A_dims == 3:
A = torch.rand(2, 64, 32, device="cuda").to(torch.float8_e4m3fn)
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
B = torch.rand(16, 32, device="cuda").to(torch.float8_e4m3fn).T
A_scale = torch.tensor(0.1, device="cuda")
B_scale = torch.tensor(0.1, device="cuda")
gm = _make_post_grad_fx(func, A, B, A_scale, B_scale, torch.bfloat16)
with _test_mode():
micro_pipeline_tp_pass(gm.graph)
self.assertIn("fused_scaled_matmul_reduce_scatter", str(gm.graph))
self.assertNotIn("reduce_scatter_tensor", str(gm.graph))
if torch.cuda.get_device_capability() < (8, 9):
return
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(
compiled, A, B, A_scale, B_scale, torch.bfloat16
)
self.assertIn("fused_scaled_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@parametrize("shard_dim", [0, 1])
@fresh_inductor_cache()
def test_dtensor_seq_par(self, shard_dim: int):
model = MLPModule(device="cuda", bias=False)
device_mesh = DeviceMesh(
"cuda",
torch.arange(0, self.world_size),
)
parallelize_plan = {
"net1": ColwiseParallel(input_layouts=Shard(shard_dim)),
"net2": RowwiseParallel(output_layouts=Shard(shard_dim)),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
if shard_dim == 0:
inp = torch.rand(8, 10, device="cuda")
elif shard_dim == 1:
inp = torch.rand(2, 8, 10, device="cuda")
else:
raise AssertionError("Invalid shard_dim")
with _test_mode():
compiled = torch.compile(model)
code = run_and_get_triton_code(compiled, inp)
self.assertIn("fused_all_gather_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
self.assertIn("fused_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
if __name__ == "__main__":
run_tests()
|
import unittest
from typing import Optional
import torch
import torch.distributed as dist
from functorch import make_fx
from torch._inductor.decomposition import decompositions
from torch._inductor.fx_passes.micro_pipeline_tp import (
_get_unexposed_collectives,
find_all_gather_patterns,
find_reduce_scatter_patterns,
micro_pipeline_tp_pass,
)
from torch._inductor.fx_passes.post_grad import remove_noop_ops, view_to_reshape
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_tensor,
reduce_scatter_tensor,
)
from torch.distributed._symmetric_memory import _test_mode
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.distributed_c10d import _get_group_size_by_name
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
instantiate_parametrized_tests,
MI300_ARCH,
parametrize,
run_tests,
runOnRocmArch,
TestCase,
)
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_micro_pipeline_tp.py
|
setUp
|
def setUp(self):
torch._inductor.config._micro_pipeline_tp = True
self.rank = 0
self.world_size = 2
torch.cuda.set_device("cuda:0")
store = FakeStore()
dist.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
|
import unittest
from typing import Optional
import torch
import torch.distributed as dist
from functorch import make_fx
from torch._inductor.decomposition import decompositions
from torch._inductor.fx_passes.micro_pipeline_tp import (
_get_unexposed_collectives,
find_all_gather_patterns,
find_reduce_scatter_patterns,
micro_pipeline_tp_pass,
)
from torch._inductor.fx_passes.post_grad import remove_noop_ops, view_to_reshape
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_tensor,
reduce_scatter_tensor,
)
from torch.distributed._symmetric_memory import _test_mode
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.distributed_c10d import _get_group_size_by_name
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
instantiate_parametrized_tests,
MI300_ARCH,
parametrize,
run_tests,
runOnRocmArch,
TestCase,
)
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
@instantiate_parametrized_tests
class MicroPipelineTPTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_2d_parallel.py
|
__init__
|
def __init__(self):
super().__init__()
self.net1 = torch.nn.Linear(5, 8)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(8, 4)
self.net3 = torch.nn.Linear(4, 12)
|
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._tensor import DeviceMesh, DTensor as DT, Replicate
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class SimpleModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_2d_parallel.py
|
forward
|
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
|
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._tensor import DeviceMesh, DTensor as DT, Replicate
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class SimpleModel(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_2d_parallel.py
|
init_model
|
def init_model(model_parallel_size=TP_DEGREE, use_orig_params=False, fsdp_nested=False):
rank = dist.get_rank()
torch.cuda.set_device(rank)
world_size = dist.get_world_size()
model = SimpleModel().cuda(rank)
# 2-D mesh is [dp, tp]
twod_mesh = DeviceMesh(
device_type="cuda",
mesh=torch.arange(0, world_size).view(model_parallel_size, -1),
)
fsdp_pg = twod_mesh.get_dim_groups()[0]
# Create Input
model = _distribute_and_fsdp_wrap_module(
model, True, twod_mesh, fsdp_pg, use_orig_params, fsdp_nested
)
return model, fsdp_pg
|
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._tensor import DeviceMesh, DTensor as DT, Replicate
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_2d_parallel.py
|
_compare_params
|
def _compare_params(self, m1, m2):
with FSDP.summon_full_params(m1):
with FSDP.summon_full_params(m2):
for n_p1, n_p2 in zip(m1.named_parameters(), m2.named_parameters()):
p1 = n_p1[1]
p2 = n_p2[1]
self.assertEqual(n_p1[0], n_p2[0])
name = n_p1[0]
if name == "net2.bias" and self.rank != 0:
continue
if type(p2) is DT:
p2 = p2.redistribute(p2.device_mesh, [Replicate()]).to_local()
self.assertTrue(torch.allclose(p1, p2), f"{p1} vs {p2}")
|
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._tensor import DeviceMesh, DTensor as DT, Replicate
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class Test2dParallelIntegration(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_2d_parallel.py
|
_clean_up_fsdp_param_name
|
def _clean_up_fsdp_param_name(self, name):
return ".".join(
filter(lambda name: name != FSDP_WRAPPED_MODULE, name.split("."))
)
|
from typing import Any
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
import torch.nn.functional as F
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed._tensor import DeviceMesh, DTensor as DT, Replicate
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
from torch.distributed.tensor.parallel import PairwiseParallel, parallelize_module
from torch.distributed.tensor.parallel.fsdp import enable_2d_with_fsdp
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
TP_DEGREE = 2
LR = 3e-5
class Test2dParallelIntegration(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_micro_pipeline_tp.py
|
_make_post_grad_fx
|
def _make_post_grad_fx(f, *inps):
gm = make_fx(f, decompositions, tracing_mode="fake")(*inps)
remove_noop_ops(gm.graph)
view_to_reshape(gm)
return gm
|
import unittest
from typing import Optional
import torch
import torch.distributed as dist
from functorch import make_fx
from torch._inductor.decomposition import decompositions
from torch._inductor.fx_passes.micro_pipeline_tp import (
_get_unexposed_collectives,
find_all_gather_patterns,
find_reduce_scatter_patterns,
micro_pipeline_tp_pass,
)
from torch._inductor.fx_passes.post_grad import remove_noop_ops, view_to_reshape
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_tensor,
reduce_scatter_tensor,
)
from torch.distributed._symmetric_memory import _test_mode
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.distributed_c10d import _get_group_size_by_name
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
instantiate_parametrized_tests,
MI300_ARCH,
parametrize,
run_tests,
runOnRocmArch,
TestCase,
)
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_micro_pipeline_tp.py
|
test_dtensor_seq_par
|
def test_dtensor_seq_par(self, shard_dim: int):
model = MLPModule(device="cuda", bias=False)
device_mesh = DeviceMesh(
"cuda",
torch.arange(0, self.world_size),
)
parallelize_plan = {
"net1": ColwiseParallel(input_layouts=Shard(shard_dim)),
"net2": RowwiseParallel(output_layouts=Shard(shard_dim)),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
if shard_dim == 0:
inp = torch.rand(8, 10, device="cuda")
elif shard_dim == 1:
inp = torch.rand(2, 8, 10, device="cuda")
else:
raise AssertionError("Invalid shard_dim")
with _test_mode():
compiled = torch.compile(model)
code = run_and_get_triton_code(compiled, inp)
self.assertIn("fused_all_gather_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
self.assertIn("fused_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
|
import unittest
from typing import Optional
import torch
import torch.distributed as dist
from functorch import make_fx
from torch._inductor.decomposition import decompositions
from torch._inductor.fx_passes.micro_pipeline_tp import (
_get_unexposed_collectives,
find_all_gather_patterns,
find_reduce_scatter_patterns,
micro_pipeline_tp_pass,
)
from torch._inductor.fx_passes.post_grad import remove_noop_ops, view_to_reshape
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_tensor,
reduce_scatter_tensor,
)
from torch.distributed._symmetric_memory import _test_mode
from torch.distributed._tensor import DeviceMesh
from torch.distributed._tensor.placement_types import Shard
from torch.distributed.distributed_c10d import _get_group_size_by_name
from torch.distributed.tensor.parallel import (
ColwiseParallel,
parallelize_module,
RowwiseParallel,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
instantiate_parametrized_tests,
MI300_ARCH,
parametrize,
run_tests,
runOnRocmArch,
TestCase,
)
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
@instantiate_parametrized_tests
class MicroPipelineTPTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_parallelize_api.py
|
__init__
|
def __init__(self, device):
super().__init__()
torch.manual_seed(5)
self.net1 = torch.nn.Linear(10, 16, device=device)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(16, 12, device=device)
|
def __init__(self) -> None:
super().__init__()
|
from collections import OrderedDict
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate
from torch.distributed.tensor.parallel._utils import _create_1d_device_mesh
from torch.distributed.tensor.parallel.api import (
_parallelize_linear,
_parallelize_mlp,
parallelize_module,
)
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
make_input_replicate_1d,
make_output_replicate_1d,
PairwiseParallel,
ParallelStyle,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class MLPModule(torch.nn.Module):
|
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
PrepareModuleOutput,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
MLPStacked,
with_comms,
)
class DummyModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_parallelize_api.py
|
forward
|
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
|
def forward(self, x):
return x
|
from collections import OrderedDict
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate
from torch.distributed.tensor.parallel._utils import _create_1d_device_mesh
from torch.distributed.tensor.parallel.api import (
_parallelize_linear,
_parallelize_mlp,
parallelize_module,
)
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
make_input_replicate_1d,
make_output_replicate_1d,
PairwiseParallel,
ParallelStyle,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class MLPModule(torch.nn.Module):
|
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
PrepareModuleOutput,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
MLPStacked,
with_comms,
)
class DummyModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_parallelize_api.py
|
test_creat_1d_device_mesh_error
|
def test_creat_1d_device_mesh_error(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
with self.assertRaisesRegex(
AssertionError,
"Expect tp_mesh_dim within range \\[-1, 1\\), but found 3.",
):
_create_1d_device_mesh(mesh, 3)
|
def _compare_params(
self,
local_module,
dist_module,
rank0_only,
skip_rowwise_bias=False,
compare_grad=False,
):
replicate = [Replicate()]
for name, param in local_module.named_parameters():
dist_param = dist_module.get_parameter(name)
param = param.grad if compare_grad else param
dist_param = dist_param.grad if compare_grad else dist_param
if (
(not rank0_only)
or (self.rank == 0)
or (
name not in ["net2.bias"]
and not skip_rowwise_bias
or name not in ["bias", "net2.bias"]
)
):
self.assertEqual(
param,
dist_param.redistribute(
device_mesh=dist_param.device_mesh, placements=replicate
).to_local(),
f"{name} not equal between dist and non-dist",
)
def _compare_module(
self, local_module, dist_module, inp_size, rank0_only=True, rowwise=False
):
LR = 0.25 # the learning rate we use for testing
local_optim = torch.optim.SGD(local_module.parameters(), lr=LR)
dist_optim = torch.optim.SGD(dist_module.parameters(), lr=LR)
torch.manual_seed(0)
inp = torch.rand(*inp_size, device=self.device_type)
self._compare_params(local_module, dist_module, rank0_only)
# check forward correctness
local_output = local_module(inp)
inp = inp.chunk(self.world_size, dim=-1)[self.rank] if rowwise else inp
dist_output = dist_module(inp)
dist_output = (
dist_output.redistribute(dist_output.device_mesh, [Replicate()]).to_local()
if isinstance(dist_output, DTensor)
else dist_output
)
self.assertEqual(local_output, dist_output)
local_output.sum().backward()
dist_output.sum().backward()
# check backward and ensure gradients are same
self._compare_params(local_module, dist_module, rank0_only, rowwise, True)
local_optim.step()
dist_optim.step()
self._compare_params(local_module, dist_module, rank0_only, rowwise)
|
from collections import OrderedDict
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate
from torch.distributed.tensor.parallel._utils import _create_1d_device_mesh
from torch.distributed.tensor.parallel.api import (
_parallelize_linear,
_parallelize_mlp,
parallelize_module,
)
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
make_input_replicate_1d,
make_output_replicate_1d,
PairwiseParallel,
ParallelStyle,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class TensorParallelAPITests(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
__init__
|
def __init__(self, device):
super().__init__()
torch.manual_seed(5)
self.net1 = torch.nn.Linear(10, 16, device=device)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(16, 12, device=device)
|
def __init__(self) -> None:
super().__init__()
# Initialize different weights for embedding and fc.
torch.manual_seed(1)
self.embedding = torch.nn.Embedding(16, 8)
torch.manual_seed(2)
self.fc = torch.nn.Linear(8, 16)
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, Replicate
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
TensorParallelMultiheadAttention,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
skip_unless_torch_gpu,
with_comms,
)
class MLPModule(torch.nn.Module):
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
from torch.distributed._tensor.experimental import implicit_replication
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
forward
|
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
|
def forward(self, x):
return self.fc(self.embedding(x))
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, Replicate
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
TensorParallelMultiheadAttention,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
skip_unless_torch_gpu,
with_comms,
)
class MLPModule(torch.nn.Module):
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
from torch.distributed._tensor.experimental import implicit_replication
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
_check_module
|
def _check_module(self, m1, m2, check_grad=False):
named_parameters = dict(m1.named_parameters())
for name, param_m2 in m2.named_parameters():
self.assertTrue(name in named_parameters)
param_m1 = named_parameters[name]
if check_grad:
param_m2 = param_m2.grad
param_m1 = param_m1.grad
if isinstance(param_m2, DTensor):
replicate = [Replicate()]
param_m2 = param_m2.redistribute(
device_mesh=param_m2.device_mesh, placements=replicate
).to_local()
self.assertEqual(param_m2, param_m1)
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
class DistTensorParallelExampleTest(DTensorTestBase):
from torch.distributed._tensor.experimental import implicit_replication
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_parallelize_api.py
|
test_parallelize_module_multi_wildcard
|
if __name__ == "__main__":
run_tests()
|
def test_parallelize_module_multi_wildcard(self):
inp_size = [12, 10]
model = MLPStacked(self.device_type, n_layers=2)
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
model_tp = deepcopy(model)
model_tp = parallelize_module(
model_tp,
device_mesh,
{
"layers.*.net[1]": ColwiseParallel(),
"layers.*.net[2]": RowwiseParallel(),
},
)
self._compare_module(model, model_tp, inp_size, rank0_only=False)
|
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
PrepareModuleOutput,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
MLPStacked,
with_comms,
)
class TensorParallelAPITests(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
__init__
|
def __init__(self, device):
super().__init__()
torch.manual_seed(5)
self.net1 = torch.nn.Linear(10, 16, device=device)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(16, 12, device=device)
|
def __init__(self) -> None:
super().__init__()
# Initialize different weights for embedding and fc.
torch.manual_seed(1)
self.embedding = torch.nn.Embedding(16, 8)
torch.manual_seed(2)
self.fc = torch.nn.Linear(8, 16)
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, Replicate
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
TensorParallelMultiheadAttention,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
skip_unless_torch_gpu,
with_comms,
)
class MLPModule(torch.nn.Module):
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
from torch.distributed._tensor.experimental import implicit_replication
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
forward
|
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
|
def forward(self, x):
return self.fc(self.embedding(x))
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, Replicate
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
TensorParallelMultiheadAttention,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
skip_unless_torch_gpu,
with_comms,
)
class MLPModule(torch.nn.Module):
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
from torch.distributed._tensor.experimental import implicit_replication
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_random_state.py
|
test_model_init
|
def test_model_init(self):
dp_size = 2
tp_size = self.world_size // dp_size
mesh_2d = init_device_mesh(
self.device_type, (dp_size, tp_size), mesh_dim_names=("dp", "tp")
)
dp_mesh = mesh_2d["dp"]
tp_mesh = mesh_2d["tp"]
dp_rank = dp_mesh.get_coordinate()[0]
tp_rank = tp_mesh.get_coordinate()[0]
self.assertEqual(dp_rank, self.rank // tp_size)
self.assertEqual(tp_rank, self.rank % tp_size)
for enable_distribute_flag in [False, True]:
# a local model on meta device
model = MLPModule(device="meta")
# the col-wise parallel style shards the weight over tensor dim 0
model_tp = parallelize_module(
model,
tp_mesh,
{
"net1": ColwiseParallel(output_layouts=Replicate()),
"net2": ColwiseParallel(output_layouts=Replicate()),
},
)
# in most cases, the random number generator states is set by data loader
# in the following way:
# - within a tensor parallel group, the RNG is set with the same seed
# - across data parallel groups, the RNG is set with different seeds
torch.cuda.manual_seed(dp_rank)
# disable/enable parallel RNG feature
random._rng_tracker.distribute_region_enabled = enable_distribute_flag
self.assertTrue(model_tp.net1.weight.is_meta)
# initialize the model's local shard
model_tp.to_empty(device=self.device_type)
model_tp.reset_parameters()
# examine that the weights are initialized adhere to DP/TP
for dtensor in [model_tp.net1.weight, model_tp.net2.weight]:
# check within the TP group
# the 1d mesh represents the TP group
_1d_mesh = dtensor.device_mesh
assert _1d_mesh.ndim == 1
self.assertEqual(_1d_mesh, tp_mesh)
tensor_local = dtensor.to_local()
# all-gather local shards
tensor_gather = funcol.all_gather_tensor(
tensor_local,
gather_dim=0,
group=_1d_mesh,
)
self.assertEqual(_1d_mesh.get_coordinate()[0], tp_rank)
# compare local shards within the TP group
def tp_weights_assert(tensor1, tensor2):
if enable_distribute_flag:
# each rank within a TP group shall initialize local weights differently
self.assertNotEqual(tensor1, tensor2)
else:
# without the parallel RNG, weight initialization violates the TP setup:
# each rank within a TP group has the same initial weights
self.assertEqual(tensor1, tensor2)
self.check_gathered_tensors(
tp_rank, tp_size, tensor_gather, tp_weights_assert
)
# check across TP groups
# all-gather local shards
tensor_gather = funcol.all_gather_tensor(
tensor_local,
gather_dim=0,
group=dp_mesh,
)
# compare local shards across TP groups
def dp_weights_assert(tensor1, tensor2):
if enable_distribute_flag:
# local weights shall be initialized the same across TP groups
self.assertEqual(tensor1, tensor2)
else:
# without the parallel RNG, weight initialization violates the TP setup:
# local weights are initialized differently across TP groups due to different
# random seeds set in data loading.
self.assertNotEqual(tensor1, tensor2)
self.check_gathered_tensors(
dp_rank, dp_size, tensor_gather, dp_weights_assert
)
|
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import init_device_mesh, Replicate
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
class TensorParallelRandomStateTests(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_tp_random_state.py
|
tp_weights_assert
|
def tp_weights_assert(tensor1, tensor2):
if enable_distribute_flag:
# each rank within a TP group shall initialize local weights differently
self.assertNotEqual(tensor1, tensor2)
else:
# without the parallel RNG, weight initialization violates the TP setup:
# each rank within a TP group has the same initial weights
self.assertEqual(tensor1, tensor2)
self.check_gathered_tensors(
tp_rank, tp_size, tensor_gather, tp_weights_assert
)
# check across TP groups
# all-gather local shards
tensor_gather = funcol.all_gather_tensor(
tensor_local,
gather_dim=0,
group=dp_mesh,
)
# compare local shards across TP groups
|
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import init_device_mesh, Replicate
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_tp_random_state.py
|
dp_weights_assert
|
def dp_weights_assert(tensor1, tensor2):
if enable_distribute_flag:
# local weights shall be initialized the same across TP groups
self.assertEqual(tensor1, tensor2)
else:
# without the parallel RNG, weight initialization violates the TP setup:
# local weights are initialized differently across TP groups due to different
# random seeds set in data loading.
self.assertNotEqual(tensor1, tensor2)
self.check_gathered_tensors(
dp_rank, dp_size, tensor_gather, dp_weights_assert
)
|
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import init_device_mesh, Replicate
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_tp_style.py
|
test_make_input_shard_1d
|
def test_make_input_shard_1d(self):
tensor = torch.rand(8, 16, device=self.device_type)
self._1d_input_func_check(tensor, tensor, make_input_shard_1d)
# Common logic for testing prepare output funcs
|
import torch
from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
make_input_replicate_1d,
make_input_shard_1d,
make_output_replicate_1d,
make_output_shard_1d,
make_output_tensor,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class TensorParallelStyleTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
_test_default_store_timeout
|
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
|
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError(f"Unexpected type {type(c2p[0])}")
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
test_store_based_barrier
|
def test_store_based_barrier(self):
f = tempfile.NamedTemporaryFile(delete=False)
port = common.find_free_port()
def thread_work(timeout, init_type, world_size, rank, error_list):
# we need to create a separate store just for the store barrier test
if init_type == "file":
barrier_store = dist.FileStore(f.name)
elif init_type == "tcp":
barrier_store = dist.TCPStore(
"localhost",
port,
world_size,
is_master=rank == 0,
wait_for_workers=False,
)
elif init_type == "hash":
barrier_store = dist.HashStore()
try:
# 1 missing worker will cause it to timeout
if rank != world_size - 1:
c10d._store_based_barrier(
rank=rank,
store=barrier_store,
group_name="_",
rendezvous_count=world_size,
timeout=timeout,
logging_interval=timeout / 2,
)
except torch.distributed.DistStoreError as e:
self.assertTrue(isinstance(e, torch.distributed.DistError))
error_list.append(e)
world_size = 4
error_list = []
threads = []
for init_type in ["file", "tcp", "hash"]:
for rank in range(world_size):
t = threading.Thread(
target=thread_work,
args=(
timedelta(seconds=3),
init_type,
world_size,
rank,
error_list,
),
)
threads.append(t)
t.start()
for i, thread in enumerate(threads):
thread.join()
# we expect the world_size-1 threads to have failed
self.assertEqual(len(error_list), world_size - 1)
for error in error_list:
self.assertTrue(
"Timed out initializing process group in store based barrier"
in error.args[0]
)
error_list = []
threads = []
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class TimeoutTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_functional_native.py
|
test_threading
|
def test_threading(self):
self._init_process_group()
device = torch.device(f"cuda:{self.rank}")
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
ar0 = funcol.all_reduce(buf0, "avg", "0")
ar0 = funcol.wait_tensor(ar0)
return ar0 + 1
arg = torch.rand(4, 4, device=device)
func(arg)
compiled = torch.compile(func, fullgraph=True)
code = run_and_get_triton_code(compiled, arg)
FileCheck().check("all_reduce_.default(buf0, 'avg', '0')").run(code)
# Unless explicitly specified (e.g. in a custom runtime), the process
# group registry is shared among all threads in a process. Here we
# verify that a process group registered in main thread can be resolved
# in a different thread.
class TestThread(threading.Thread):
def run(self):
self.exc = None
try:
func(arg)
compiled(arg)
except BaseException as exc:
self.exc = exc
def join(self):
threading.Thread.join(self)
if self.exc:
raise self.exc
t = TestThread()
t.start()
t.join()
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
@requires_nccl()
class TestWithNCCL(MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_functional_native.py
|
run
|
def run(self):
self.exc = None
try:
func(arg)
compiled(arg)
except BaseException as exc:
self.exc = exc
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
class TestThread(threading.Thread):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_functional_native.py
|
join
|
def join(self):
threading.Thread.join(self)
if self.exc:
raise self.exc
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
class TestThread(threading.Thread):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/tensor/parallel/test_tp_style.py
|
test_make_output_tensor
|
def test_make_output_tensor(self):
# test when output is sharded.
output, dtensor, device_mesh = self._test_prepare_output(
make_output_tensor, [Shard(0)]
)
self.assertEqual(
output, dtensor.redistribute(device_mesh, [Replicate()]).to_local()
)
# test when output is replicated.
output, dtensor, device_mesh = self._test_prepare_output(
make_output_tensor, [Replicate()]
)
self.assertEqual(
output, dtensor.redistribute(device_mesh, [Replicate()]).to_local()
)
# test when input device_mesh is None.
output, dtensor, device_mesh = self._test_prepare_output(
make_output_tensor, [Shard(0)], None, True
)
self.assertEqual(
output, dtensor.redistribute(device_mesh, [Replicate()]).to_local()
)
# Common logic for testing prepare output funcs errors.
|
import torch
from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
make_input_replicate_1d,
make_input_shard_1d,
make_output_replicate_1d,
make_output_shard_1d,
make_output_tensor,
RowwiseParallel,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class TensorParallelStyleTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/tensor/parallel/test_tp_style.py
|
test_sequence_parallel_style
|
if __name__ == "__main__":
run_tests()
|
def test_sequence_parallel_style(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
batch, N, embedding_dim = 20, 8, 12
global_input = torch.rand(
batch,
N * self.world_size,
embedding_dim,
device=self.device_type,
requires_grad=True,
)
sharded_input = distribute_tensor(global_input, mesh, [Shard(1)])
# test LayerNorm
for elementwise_affine in [True, False]:
norm = nn.LayerNorm(
embedding_dim,
elementwise_affine=elementwise_affine,
device=self.device_type,
)
sp_norm = parallelize_module(deepcopy(norm), mesh, SequenceParallel())
output = norm(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_norm(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0
)
if elementwise_affine:
self.assertEqual(sp_norm.weight.grad.placements, (_Partial(),))
self.assertEqual(sp_norm.bias.grad.placements, (_Partial(),))
self.assertEqual(sharded_out.full_tensor(), output)
# test RMSNorm
rmsnorm = RMSNormPython(embedding_dim).to(self.device_type)
sp_rmsnorm = parallelize_module(deepcopy(rmsnorm), mesh, SequenceParallel())
output = rmsnorm(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_rmsnorm(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(sp_rmsnorm.weight.grad.placements, (_Partial(),))
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0)
self.assertEqual(sharded_out.full_tensor(), output)
# test dropout
dropout = nn.Dropout(0.5).to(self.device_type)
sp_dropout = parallelize_module(deepcopy(dropout), mesh, SequenceParallel())
output = dropout(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_dropout(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
# test sharded on non-sequence dim input
sharded_batch_input = distribute_tensor(global_input, mesh, [Shard(0)])
rmsnorm = RMSNormPython(embedding_dim).to(self.device_type)
sp_rmsnorm = parallelize_module(deepcopy(rmsnorm), mesh, SequenceParallel())
with comm_mode:
sharded_out = sp_rmsnorm(sharded_batch_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
# output still sharded on sequence dimension
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(sp_rmsnorm.weight.grad.placements, (_Partial(),))
# communication happens in both fwd/bwd to redistribute input
self.assertEqual(comm_mode.get_total_counts(), 2)
|
from copy import deepcopy
import torch
import torch.nn as nn
from torch.distributed._tensor import (
distribute_tensor,
DTensor,
init_device_mesh,
Replicate,
Shard,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import parallelize_module
from torch.distributed.tensor.parallel.style import (
ColwiseParallel,
PrepareModuleInput,
PrepareModuleOutput,
RowwiseParallel,
SequenceParallel,
)
from torch.distributed.tensor.placement_types import _Partial
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
RMSNormPython,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
class TensorParallelStyleTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/tensor/parallel/test_view_sharding_dim_change.py
|
test_view_with_sharding_dim_change
|
def test_view_with_sharding_dim_change(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
torch.manual_seed(self.rank)
tensor = torch.rand(3, 5, 6, device=self.device_type)
sharding = [Shard(2)]
dt = DTensor.from_local(tensor, device_mesh, sharding)
dt = _view_with_sharding_dim_change(dt, 1, (3, -1, 6))
self.assertTrue(dt.placements[0].is_shard(dim=1))
self.assertEqual(dt.to_local(), tensor.view(3, -1, 6))
|
import torch
from torch.distributed._tensor import DeviceMesh, DTensor, Shard
from torch.distributed.tensor.parallel._view_with_dim_change import (
_view_with_sharding_dim_change,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class TPViewShardingDimChangeTest(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
_test_store_timeout
|
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
|
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "(?i)timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
_init_methods
|
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
|
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///{}".format(f.name.replace("\\", "/"))
f.close()
else:
yield f"file://{f.name}"
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractTimeoutTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
_setup_optimizer
|
self.assertEqual(output, output_tp)
|
def _setup_optimizer(self, model, model_tp):
# Step 3: Run test by comparing outputs from single-gpu and multi-gpu models.
LR = 0.25
optim = torch.optim.Adam(model.parameters(), lr=LR)
optim_tp = torch.optim.Adam(model_tp.parameters(), lr=LR)
return optim, optim_tp
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
class DistTensorParallelExampleTest(DTensorTestBase):
from torch.distributed._tensor.experimental import implicit_replication
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
forward
|
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
|
def forward(self, x):
return self.fc(self.embedding(x))
|
import torch
import torch.nn as nn
from torch.distributed._tensor import DeviceMesh, Replicate
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
TensorParallelMultiheadAttention,
)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
NUM_DEVICES,
skip_unless_torch_gpu,
with_comms,
)
class MLPModule(torch.nn.Module):
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
from torch.distributed._tensor.experimental import implicit_replication
class TestModule(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/tensor/parallel/test_tp_examples.py
|
test_loss_parallel
|
if __name__ == "__main__":
run_tests()
|
def test_loss_parallel(self):
device_mesh = self.build_device_mesh()
comm_mode = CommDebugMode()
channel_size, channel_dim = 16, 1
test_setup = [
(2, (8, channel_size), (8,)), # calling aten.nll_loss_forward
(3, (8, channel_size, 12), (8, 12)), # calling aten.nll_loss2d_forward
]
weight = torch.rand(channel_size, device=self.device_type)
for input_ndim, input_size, target_size in test_setup:
x = torch.rand(*input_size, device=self.device_type, requires_grad=True)
target = torch.randint(channel_size, target_size, device=self.device_type)
shard_dims = list(range(input_ndim))
reductions = ["none", "mean", "sum"]
for shard_dim, reduction in itertools.product(shard_dims, reductions):
dist_x = distribute_tensor(x, device_mesh, [Shard(shard_dim)])
y = F.cross_entropy(x, target, weight, reduction=reduction)
with loss_parallel():
if shard_dim == channel_dim:
with comm_mode:
dist_y = F.cross_entropy(
dist_x, target, weight, reduction=reduction
)
self.assertEqual(comm_mode.get_total_counts(), 3)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_reduce],
3,
)
self.assertTrue(dist_y.placements[0].is_replicate())
self.assertEqual(dist_y.to_local(), y)
with comm_mode:
if reduction == "none":
y.sum().backward()
dist_y.sum().backward()
else:
y.backward()
dist_y.backward()
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertTrue(
dist_x.grad.placements[0].is_shard(shard_dim)
)
self.assertEqual(dist_x.grad.full_tensor(), x.grad)
x.grad.zero_()
else:
with self.assertRaisesRegex(
ValueError,
"loss_parallel",
):
dist_y = F.cross_entropy(
dist_x, target, reduction=reduction
)
|
import itertools
from copy import deepcopy
from typing import Dict, NamedTuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._tensor import (
DeviceMesh,
distribute_tensor,
DTensor,
Replicate,
Shard,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.tensor.debug import CommDebugMode
from torch.distributed.tensor.parallel import (
ColwiseParallel,
loss_parallel,
parallelize_module,
RowwiseParallel,
)
from torch.distributed.tensor.parallel.input_reshard import input_reshard
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
ModelArgs,
NUM_DEVICES,
skip_unless_torch_gpu,
Transformer,
with_comms,
)
c10d_functional = torch.ops.c10d_functional
reduce_scatter, all_gather, all_reduce = (
c10d_functional.reduce_scatter_tensor,
c10d_functional.all_gather_into_tensor,
c10d_functional.all_reduce,
)
class DistTensorParallelExampleTest(DTensorTestBase):
from torch.distributed._tensor.experimental import implicit_replication
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/distributed/tensor/parallel/test_tp_random_state.py
|
get_tensor_slice
|
def get_tensor_slice(self, idx, n, large_tensor):
shape = large_tensor.shape
assert shape[0] % n == 0
local_shape = [shape[0] // n, shape[1]]
slice_idx = [
slice(idx * local_shape[0], (idx + 1) * local_shape[0]),
slice(local_shape[1]),
]
return large_tensor[slice_idx]
|
import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.tensor._random as random
from torch.distributed._tensor import init_device_mesh, Replicate
from torch.distributed.tensor.parallel.api import parallelize_module
from torch.distributed.tensor.parallel.style import ColwiseParallel
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
MLPModule,
with_comms,
)
class TensorParallelRandomStateTests(DTensorTestBase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_functional_native.py
|
_tolist_with_constrain_as_size
|
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
for elem in lst:
torch._check_is_size(elem)
return lst
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_functional_native.py
|
test_ranks_and_tag
|
def test_ranks_and_tag(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
# Expect in-place with inductor allocated buf
ar0 = funcol.all_reduce(buf0, "avg", [0, 1], "")
ar0 = funcol.wait_tensor(ar0)
# Expect no in-place with graph input
ar1 = funcol.all_reduce(arg, "avg", [0, 1], "")
ar1 = funcol.wait_tensor(ar1)
return ar0, ar1
arg = torch.rand(4, 4, device="cuda")
compiled = torch.compile(func, fullgraph=True)
code = run_and_get_triton_code(compiled, arg)
(FileCheck().check("all_reduce_.default(buf0, 'avg', '0')").run(code))
|
import threading
import unittest
from typing import List
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as funcol
from torch._C import FileCheck
from torch._inductor.utils import fresh_inductor_cache, run_and_get_triton_code
from torch.distributed._functional_collectives import (
all_gather_into_tensor_coalesced,
all_gather_tensor,
all_reduce,
all_reduce_coalesced,
all_to_all_single,
AsyncCollectiveTensor,
reduce_scatter_tensor,
reduce_scatter_tensor_coalesced,
)
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
run_tests,
TestCase,
)
from torch.testing._internal.distributed.fake_pg import FakeStore
from torch.utils._triton import has_triton
import sys
from importlib.machinery import SourceFileLoader
from pathlib import Path
from unittest import mock
AOTIRunnerUtil = load_test_module("inductor.test_aot_inductor_utils").AOTIRunnerUtil
import sys
class CompileTest(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
test_ddp_checkpointing_twice_weight_sharing
|
def test_ddp_checkpointing_twice_weight_sharing(self):
"""
Checkpointing should work with static graph in the case of checkpointing
same layer twice and having weights shared acrosss layers.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModuleWeightSharing(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
|
def test_ddp_checkpointing_twice_weight_sharing(self):
"""
Checkpointing should work with static graph in the case of checkpointing
same layer twice and having weights shared across layers.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModuleWeightSharing(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class CommonDistributedDataParallelTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class CommonDistributedDataParallelTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
_test_sequence_num_incremented
|
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
|
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = dict(obj_list)
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
_test_rank_membership
|
def _test_rank_membership(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self.assertTrue(self.world_size > 1)
group = dist.new_group(ranks=[1])
self.assertEqual(dist.get_group_rank(group, 1), 0)
with self.assertRaisesRegex(RuntimeError, "not part of group"):
dist.get_group_rank(group, 0)
with self.assertRaisesRegex(RuntimeError, "not registered"):
dist.get_group_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_global_rank(group, 0), 1)
with self.assertRaisesRegex(RuntimeError, "not part of group"):
dist.get_global_rank(group, 1)
with self.assertRaisesRegex(RuntimeError, "not registered"):
dist.get_global_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_process_group_ranks(group), [1])
|
def _test_rank_membership(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self.assertTrue(self.world_size > 1)
group = dist.new_group(ranks=[1])
self.assertEqual(dist.get_group_rank(group, 1), 0)
with self.assertRaisesRegex(ValueError, "not part of group"):
dist.get_group_rank(group, 0)
with self.assertRaisesRegex(ValueError, "not registered"):
dist.get_group_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_global_rank(group, 0), 1)
with self.assertRaisesRegex(ValueError, "not part of group"):
dist.get_global_rank(group, 1)
with self.assertRaisesRegex(ValueError, "not registered"):
dist.get_global_rank(DummyProcessGroup(self.rank, self.world_size), 0)
self.assertEqual(dist.get_process_group_ranks(group), [1])
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
test_debug_level
|
def test_debug_level(self):
try:
del os.environ["TORCH_DISTRIBUTED_DEBUG"]
except KeyError:
pass
dist.set_debug_level_from_env()
# Default should be off
default_debug_mode = dist.get_debug_level()
self.assertEqual(default_debug_mode, dist.DebugLevel.OFF)
mapping = {
"OFF": dist.DebugLevel.OFF,
"off": dist.DebugLevel.OFF,
"oFf": dist.DebugLevel.OFF,
"INFO": dist.DebugLevel.INFO,
"info": dist.DebugLevel.INFO,
"INfO": dist.DebugLevel.INFO,
"DETAIL": dist.DebugLevel.DETAIL,
"detail": dist.DebugLevel.DETAIL,
"DeTaIl": dist.DebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
dist.set_debug_level_from_env()
set_debug_mode = dist.get_debug_level()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
dist.set_debug_level_from_env()
|
def test_debug_level(self):
try:
del os.environ["TORCH_DISTRIBUTED_DEBUG"]
except KeyError:
pass
dist.set_debug_level_from_env()
# Default should be off
default_debug_mode = dist.get_debug_level()
self.assertEqual(default_debug_mode, dist.DebugLevel.OFF)
mapping = {
"OFF": dist.DebugLevel.OFF,
"off": dist.DebugLevel.OFF,
"oFf": dist.DebugLevel.OFF,
"INFO": dist.DebugLevel.INFO,
"info": dist.DebugLevel.INFO,
"INfO": dist.DebugLevel.INFO,
"DETAIL": dist.DebugLevel.DETAIL,
"detail": dist.DebugLevel.DETAIL,
"DeTaIl": dist.DebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
dist.set_debug_level_from_env()
set_debug_mode = dist.get_debug_level()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(
ValueError, "The value of TORCH_DISTRIBUTED_DEBUG must"
):
dist.set_debug_level_from_env()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class CommTest(AbstractCommTest, MultiProcessTestCase):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class CommTest(AbstractCommTest, MultiProcessTestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
comm_fn
|
def comm_fn(tensor, group=None):
work = dist.all_reduce(tensor, group=group, async_op=True)
return work, tensor
self._test_work_wait(tensor, comm_fn=comm_fn)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/test_c10d_common.py
|
_test_tensor_dtype_mismatch
|
def _test_tensor_dtype_mismatch(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
tensor = torch.ones(2, 2, device=self.device) * 7
tensor_h = tensor.half()
tensor_list = [torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)]
tensor_list_h = list(tensor_list)
tensor_list_h[1] = tensor_list_h[1].half()
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_gather(tensor_list_h, tensor)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_gather(tensor_list, tensor_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_gather_coalesced([tensor_list_h], tensor_list)
dist.all_gather_coalesced([tensor_list], tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_reduce_coalesced(tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.reduce_scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.reduce_scatter(tensor_h, tensor_list)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_to_all_single(tensor_h, tensor)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_to_all(tensor_list_h, tensor_list)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.all_to_all(tensor_list, tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.gather(tensor_h, tensor_list)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.gather(tensor, tensor_list_h)
with self.assertRaisesRegex(RuntimeError, "tensors with different dtypes"):
dist.scatter(tensor_h, tensor_list)
|
def _test_tensor_dtype_mismatch(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
tensor = torch.ones(2, 2, device=self.device) * 7
tensor_h = tensor.half()
tensor_list = [
torch.zeros(2, 2, device=self.device) for _ in range(self.world_size)
]
tensor_list_h = list(tensor_list)
tensor_list_h[1] = tensor_list_h[1].half()
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list_h, tensor)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather(tensor_list, tensor_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_gather_coalesced([tensor_list_h], tensor_list)
dist.all_gather_coalesced([tensor_list], tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_reduce_coalesced(tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.reduce_scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.reduce_scatter(tensor_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all_single(tensor_h, tensor)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all(tensor_list_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.all_to_all(tensor_list, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.scatter(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.gather(tensor_h, tensor_list)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.gather(tensor, tensor_list_h)
with self.assertRaisesRegex(ValueError, "tensors with different dtypes"):
dist.scatter(tensor_h, tensor_list)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
_test_bool_tensors
|
def _test_bool_tensors(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
device = "cuda" if backend == "nccl" else "cpu"
# test alltoall_base
tensor = torch.tensor([1, 0, 0, 1], dtype=torch.bool, device=device)
zeros = torch.tensor([0, 0, 0, 0], dtype=torch.bool, device=device)
outensor = zeros if self.rank > 0 else tensor
dist.broadcast(outensor, src=0)
self.assertEqual(outensor, tensor)
# Variant of AbstractCommTest that expects world size of 4
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
_test_new_group_local_sync
|
def _test_new_group_local_sync(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
rank = dist.get_rank()
ranks_in = [rank, (rank + 2) % self.world_size]
ranks_out = [i for i in range(self.world_size) if i not in ranks_in]
self.assertIn(rank, ranks_in)
self.assertNotIn(rank, ranks_out)
self.assertIsNone(
dist.new_group(ranks=ranks_out, use_local_synchronization=True)
)
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
self.assertIsInstance(new_pg, dist.ProcessGroup)
# PTD sorts ranks before creating the PG, so [3, 1] actually gets assigned ranks [1, 0]
ranks_in.sort()
self.assertEqual(dist.get_group_rank(new_pg, rank), ranks_in.index(rank))
self.assertEqual(
ranks_in,
dist.get_process_group_ranks(new_pg),
f"expecting {ranks_in} but got {dist.get_process_group_ranks(new_pg)}",
)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractLargeCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
_test_new_group_local_sync_sanity_check
|
def _test_new_group_local_sync_sanity_check(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
rank = dist.get_rank()
# split the world in 2 PGs
rank = dist.get_rank()
pg_idx = rank // 2
ranks_in = [pg_idx * 2, pg_idx * 2 + 1]
new_pg = dist.new_group(ranks=ranks_in, use_local_synchronization=True)
input_tensor = torch.tensor([pg_idx, rank], device=self.device)
output_tensor_list = [
torch.tensor(
[-1, -1],
device=self.device,
)
for _ in range(new_pg.size())
]
dist.all_gather(output_tensor_list, input_tensor, group=new_pg)
expected = [
torch.tensor([pg_idx, ranks_in[0]], device=self.device),
torch.tensor([pg_idx, ranks_in[1]], device=self.device),
]
self.assertEqual(output_tensor_list, expected)
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class AbstractLargeCommTest:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/distributed/test_c10d_common.py
|
__init__
|
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Callable, Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.distributed._spmd.comm_tensor import _wait_comm, CommTensor
from torch.fx.experimental.proxy_tensor import make_fx
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
import copy
import os
import pickle
import sys
import tempfile
import threading
import time
from contextlib import nullcontext
from dataclasses import dataclass
from datetime import timedelta
from itertools import product
from sys import platform
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.distributed.distributed_c10d as c10d
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
load_tests,
parametrize,
retry_on_connect_failures,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
TestCase,
)
from torch.utils.checkpoint import checkpoint
load_tests = load_tests
torch.backends.cuda.matmul.allow_tf32 = False
class Net(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.