library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/elastic/multiprocessing/redirects_test.py
_redirect_large_buffer
def _redirect_large_buffer(self, print_fn, num_lines=500_000): stdout_log = os.path.join(self.test_dir, "stdout.log") with redirect_stdout(stdout_log): for i in range(num_lines): print_fn(i) with open(stdout_log) as fp: actual = {int(line.split(":")[1]) for line in fp.readlines()} expected = set(range(num_lines)) self.assertSetEqual(expected, actual)
def _redirect_large_buffer(self, print_fn, num_lines=500_000): stdout_log = os.path.join(self.test_dir, "stdout.log") with redirect_stdout(stdout_log): for i in range(num_lines): print_fn(i) with open(stdout_log) as fp: actual = {int(line.split(":")[1]) for line in fp} expected = set(range(num_lines)) self.assertSetEqual(expected, actual)
import ctypes import os import shutil import sys import tempfile import unittest from torch.distributed.elastic.multiprocessing.redirects import ( redirect, redirect_stderr, redirect_stdout, ) libc = ctypes.CDLL("libc.so.6") c_stderr = ctypes.c_void_p.in_dll(libc, "stderr") class RedirectsTest(unittest.TestCase):
import ctypes import os import shutil import sys import tempfile import unittest from torch.distributed.elastic.multiprocessing.redirects import ( redirect, redirect_stderr, redirect_stdout, ) libc = ctypes.CDLL("libc.so.6") c_stderr = ctypes.c_void_p.in_dll(libc, "stderr") class RedirectsTest(unittest.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/multiprocessing/tail_log_test.py
test_tail
def test_tail(self): """ writer() writes 0 - max (on number on each line) to a log file. Run nprocs such writers and tail the log files into an IOString and validate that all lines are accounted for. """ nprocs = 32 max = 1000 interval_sec = 0.0001 log_files = { local_rank: os.path.join(self.test_dir, f"{local_rank}_stdout.log") for local_rank in range(nprocs) } dst = io.StringIO() tail = TailLog("writer", log_files, dst, interval_sec).start() # sleep here is intentional to ensure that the log tail # can gracefully handle and wait for non-existent log files time.sleep(interval_sec * 10) futs = [] for local_rank, file in log_files.items(): f = self.threadpool.submit( write, max=max, sleep=interval_sec * local_rank, file=file ) futs.append(f) wait(futs, return_when=ALL_COMPLETED) self.assertFalse(tail.stopped()) tail.stop() dst.seek(0) actual: Dict[int, Set[int]] = {} for line in dst.readlines(): header, num = line.split(":") nums = actual.setdefault(header, set()) nums.add(int(num)) self.assertEqual(nprocs, len(actual)) self.assertEqual( {f"[writer{i}]": set(range(max)) for i in range(nprocs)}, actual ) self.assertTrue(tail.stopped())
def test_tail(self): """ writer() writes 0 - max (on number on each line) to a log file. Run nprocs such writers and tail the log files into an IOString and validate that all lines are accounted for. """ nprocs = 32 max = 1000 interval_sec = 0.0001 log_files = { local_rank: os.path.join(self.test_dir, f"{local_rank}_stdout.log") for local_rank in range(nprocs) } dst = io.StringIO() tail = TailLog( name="writer", log_files=log_files, dst=dst, interval_sec=interval_sec ).start() # sleep here is intentional to ensure that the log tail # can gracefully handle and wait for non-existent log files time.sleep(interval_sec * 10) futs = [] for local_rank, file in log_files.items(): f = self.threadpool.submit( write, max=max, sleep=interval_sec * local_rank, file=file ) futs.append(f) wait(futs, return_when=ALL_COMPLETED) self.assertFalse(tail.stopped()) tail.stop() dst.seek(0) actual: Dict[int, Set[int]] = {} for line in dst.readlines(): header, num = line.split(":") nums = actual.setdefault(header, set()) nums.add(int(num)) self.assertEqual(nprocs, len(actual)) self.assertEqual( {f"[writer{i}]": set(range(max)) for i in range(nprocs)}, actual ) self.assertTrue(tail.stopped())
import io import os import shutil import sys import tempfile import time import unittest from concurrent.futures import wait from concurrent.futures._base import ALL_COMPLETED from concurrent.futures.thread import ThreadPoolExecutor from typing import Dict, Set from unittest import mock from torch.distributed.elastic.multiprocessing.tail_log import TailLog class TailLogTest(unittest.TestCase):
import io import os import shutil import sys import tempfile import time import unittest from concurrent.futures import wait from concurrent.futures._base import ALL_COMPLETED from concurrent.futures.thread import ThreadPoolExecutor from typing import Dict, Set from unittest import mock from torch.distributed.elastic.multiprocessing.tail_log import TailLog class TailLogTest(unittest.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/multiprocessing/tail_log_test.py
test_tail_with_custom_prefix
def test_tail_with_custom_prefix(self): """ writer() writes 0 - max (on number on each line) to a log file. Run nprocs such writers and tail the log files into an IOString and validate that all lines are accounted for. """ nprocs = 3 max = 10 interval_sec = 0.0001 log_files = { local_rank: os.path.join(self.test_dir, f"{local_rank}_stdout.log") for local_rank in range(nprocs) } dst = io.StringIO() log_line_prefixes = {n: f"[worker{n}][{n}]:" for n in range(nprocs)} tail = TailLog( "writer", log_files, dst, interval_sec=interval_sec, log_line_prefixes=log_line_prefixes, ).start() # sleep here is intentional to ensure that the log tail # can gracefully handle and wait for non-existent log files time.sleep(interval_sec * 10) futs = [] for local_rank, file in log_files.items(): f = self.threadpool.submit( write, max=max, sleep=interval_sec * local_rank, file=file ) futs.append(f) wait(futs, return_when=ALL_COMPLETED) self.assertFalse(tail.stopped()) tail.stop() dst.seek(0) headers: Set[str] = set() for line in dst.readlines(): header, _ = line.split(":") headers.add(header) self.assertEqual(nprocs, len(headers)) for i in range(nprocs): self.assertIn(f"[worker{i}][{i}]", headers) self.assertTrue(tail.stopped())
import io import os import shutil import sys import tempfile import time import unittest from concurrent.futures import wait from concurrent.futures._base import ALL_COMPLETED from concurrent.futures.thread import ThreadPoolExecutor from typing import Dict, Set from unittest import mock from torch.distributed.elastic.multiprocessing.tail_log import TailLog class TailLogTest(unittest.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
test_share_store_creates_tcp_store
def test_share_store_creates_tcp_store(self): handler = self._create_handler() shared_store_info = RendezvousStoreInfo("host", 54321) with patch.object(RendezvousStoreInfo, "build", return_value=shared_store_info): rdzv_info = handler.next_rendezvous() self.assertEqual(rdzv_info.bootstrap_store_info.master_addr, "host") self.assertEqual(rdzv_info.bootstrap_store_info.master_port, 54321) self.assertEqual(handler._shared_tcp_store_server, self._tcp_store_mock) rdzv_info = handler.next_rendezvous() self.assertEqual(handler._shared_tcp_store_server, self._tcp_store_mock)
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, ) class DynamicRendezvousHandlerTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
_ignore_exception
def _ignore_exception(exception_type: Exception, fn: Callable): try: fn() except exception_type as e: pass
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
_wait_while
def _wait_while(): while True: if condition(): break else: time.sleep(interval) wait_thread = threading.Thread(target=_wait_while, name=name) wait_thread.start() wait_thread.join(timeout=timeout)
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
run
def run(self): if self._target is not None: self._result = self._target(*self._args, **self._kwargs)
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, ) class _CapturingThread(threading.Thread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
join
def join(self, *args): threading.Thread.join(self, *args) return self._result
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, ) class _CapturingThread(threading.Thread):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
test_share_tcp_store_is_disabled
def test_share_tcp_store_is_disabled(self, prefix_store_class_mock): prefix_store = Mock() prefix_store_class_mock.return_value = prefix_store prefix_store.set.return_value = None prefix_store.get.return_value = b"123" tcp_store = Mock(spec=dist.TCPStore) # this will be injected self._store = tcp_store handler1 = self._create_handler(min_nodes=2, max_nodes=2) handler2 = self._create_handler(min_nodes=2, max_nodes=2) handler1_thread = _CapturingThread(target=handler1.next_rendezvous) handler2_thread = _CapturingThread(target=handler2.next_rendezvous) handler1_thread.start() handler2_thread.start() rdzv_info1: RendezvousInfo = handler1_thread.join() rdzv_info2: RendezvousInfo = handler2_thread.join() self.assertEqual(rdzv_info1.store, prefix_store) self.assertEqual(rdzv_info2.store, prefix_store) prefix_store_class_mock.assert_called_with( "torch.rendezvous.dummy_run_id.0", self._store ) self.assertEqual(rdzv_info1.bootstrap_store_info.master_port, 123) self.assertEqual(rdzv_info2.bootstrap_store_info.master_port, 123)
import copy import os import pickle import socket import threading import time from abc import ABC, abstractmethod from base64 import b64encode from datetime import datetime, timedelta from typing import Callable, cast, Optional, Tuple from unittest import TestCase from unittest.mock import call, MagicMock, Mock, patch, PropertyMock import torch.distributed as dist from torch.distributed import HashStore, Store from torch.distributed.elastic.rendezvous import ( RendezvousClosedError, RendezvousError, RendezvousInfo, RendezvousParameters, RendezvousStateError, RendezvousStoreInfo, RendezvousTimeoutError, ) from torch.distributed.elastic.rendezvous.dynamic_rendezvous import ( _Action, _BackendRendezvousStateHolder, _DistributedRendezvousOpExecutor, _NodeDesc, _NodeDescGenerator, _RendezvousCloseOp, _RendezvousContext, _RendezvousExitOp, _RendezvousJoinOp, _RendezvousKeepAliveOp, _RendezvousState, _RendezvousStateHolder, create_handler, DynamicRendezvousHandler, RendezvousBackend, RendezvousSettings, RendezvousTimeout, Token, ) class IntegrationTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/rendezvous/etcd_server_test.py
test_etcd_server_with_rendezvous
def test_etcd_server_with_rendezvous(self): server = EtcdServer() server.start() client = etcd.Client(server.get_host(), server.get_port()) rdzv = EtcdRendezvous( client=client, prefix="test", run_id=1, num_min_workers=1, num_max_workers=1, timeout=60, last_call_timeout=30, ) rdzv_handler = EtcdRendezvousHandler(rdzv) store, rank, world_size = rdzv_handler.next_rendezvous() self.assertIsNotNone(store) self.assertEqual(0, rank) self.assertEqual(1, world_size)
def test_etcd_server_with_rendezvous(self): server = EtcdServer() server.start() try: client = etcd.Client(server.get_host(), server.get_port()) rdzv = EtcdRendezvous( client=client, prefix="test", run_id=1, num_min_workers=1, num_max_workers=1, timeout=60, last_call_timeout=30, ) rdzv_handler = EtcdRendezvousHandler(rdzv) rdzv_info = rdzv_handler.next_rendezvous() self.assertIsNotNone(rdzv_info.store) self.assertEqual(0, rdzv_info.rank) self.assertEqual(1, rdzv_info.world_size) finally: server.stop()
import os import unittest import etcd from torch.distributed.elastic.rendezvous.etcd_rendezvous import ( EtcdRendezvous, EtcdRendezvousHandler, ) from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer class EtcdServerTest(unittest.TestCase):
import os import sys import unittest import etcd from torch.distributed.elastic.rendezvous.etcd_rendezvous import ( EtcdRendezvous, EtcdRendezvousHandler, ) from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer class EtcdServerTest(unittest.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/rendezvous/static_rendezvous_test.py
test_static_rdzv_multiple_calls
def test_static_rdzv_multiple_calls(self): sock = get_socket_with_port() with closing(sock): master_port = sock.getsockname()[1] master_addr = "localhost" rdzv_params = RendezvousParameters( backend="static", endpoint=f"{master_addr}:{master_port}", run_id="test_id", min_nodes=1, max_nodes=1, rank=0, ) rdzv_handler = create_rdzv_handler(rdzv_params) # Call rendezvous two times store, rank, world_size = rdzv_handler.next_rendezvous() self.assertIsNotNone(store) self.assertEqual(0, rank) self.assertEqual(1, world_size) store, rank, world_size = rdzv_handler.next_rendezvous() self.assertIsNotNone(store) self.assertEqual(0, rank) self.assertEqual(1, world_size)
def test_static_rdzv_multiple_calls(self): sock = get_socket_with_port() with closing(sock): master_port = sock.getsockname()[1] master_addr = "localhost" rdzv_params = RendezvousParameters( backend="static", endpoint=f"{master_addr}:{master_port}", run_id="test_id", min_nodes=1, max_nodes=1, rank=0, ) rdzv_handler = create_rdzv_handler(rdzv_params) # Call rendezvous two times rdzv_info = rdzv_handler.next_rendezvous() self.assertIsNotNone(rdzv_info.store) self.assertEqual(0, rdzv_info.rank) self.assertEqual(1, rdzv_info.world_size) rdzv_info = rdzv_handler.next_rendezvous() self.assertIsNotNone(rdzv_info.store) self.assertEqual(0, rdzv_info.rank) self.assertEqual(1, rdzv_info.world_size)
import unittest from contextlib import closing from torch.distributed.elastic.rendezvous import RendezvousParameters from torch.distributed.elastic.rendezvous.static_tcp_rendezvous import ( create_rdzv_handler, ) from torch.distributed.elastic.utils import get_socket_with_port class StaticTCPRendezvousTest(unittest.TestCase):
import unittest from contextlib import closing from torch.distributed.elastic.rendezvous import RendezvousParameters from torch.distributed.elastic.rendezvous.static_tcp_rendezvous import ( create_rdzv_handler, ) from torch.distributed.elastic.utils import get_socket_with_port class StaticTCPRendezvousTest(unittest.TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/test_control_plane.py
_new_conn
def _new_conn(self): return UnixHTTPConnection(self.socket_path)
import json import os import pickle import socket import tempfile from contextlib import contextmanager from typing import Dict from urllib3.connection import HTTPConnection from urllib3.connectionpool import HTTPConnectionPool from torch.distributed.elastic.control_plane import ( TORCH_WORKER_SERVER_SOCKET, worker_main, ) from torch.testing._internal.common_utils import requires_cuda, run_tests, TestCase class UnixHTTPConnectionPool(HTTPConnectionPool): import requests from torch._C._distributed_c10d import _WorkerServer from torch._C._distributed_c10d import _get_handler, _Request, _Response from torch._C._distributed_c10d import _get_handler from torch._C._distributed_c10d import _get_handler_names
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
func2
def func2(n, file_path): if file_path is not None: timer.configure(timer.FileTimerClient(file_path)) if n > 0: with timer.expires(after=0.1): func2(n - 1, None) time.sleep(0.2) class FileTimerTest(TestCase): def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4()) self.server = timer.FileTimerServer(self.file_path, self.max_interval) self.server.start() def tearDown(self): super().tearDown() self.server.stop() def test_exception_propagation(self): with self.assertRaises(RuntimeError, msg="foobar"): with timer.expires(after=1): raise RuntimeError("foobar") def test_no_client(self): # no timer client configured; exception expected timer.configure(None) with self.assertRaises(RuntimeError): with timer.expires(after=1): pass def test_client_interaction(self): # no timer client configured but one passed in explicitly # no exception expected timer_client = timer.FileTimerClient(self.file_path) timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire) timer_client.release = mock.MagicMock(wraps=timer_client.release) with timer.expires(after=1, scope="test", client=timer_client): pass timer_client.acquire.assert_called_once_with("test", mock.ANY) timer_client.release.assert_called_once_with("test") def test_happy_path(self): timer.configure(timer.FileTimerClient(self.file_path)) with timer.expires(after=0.5): time.sleep(0.1) def test_get_timer_recursive(self): """ If a function acquires a countdown timer with default scope, then recursive calls to the function should re-acquire the timer rather than creating a new one. That is only the last recursive call's timer will take effect. """ timer.configure(timer.FileTimerClient(self.file_path)) # func should not time out def func(n): if n > 0: with timer.expires(after=0.1): func(n - 1) time.sleep(0.05) func(4) p = mp.Process(target=func2, args=(2, self.file_path)) p.start() p.join() self.assertEqual(-signal.SIGKILL, p.exitcode) def test_multiple_clients_interaction(self): # func should not time out def func(n, file_path): if file_path is not None: timer.configure(timer.FileTimerClient(file_path)) if n > 0: with timer.expires(after=100): func(n - 1, None) time.sleep(0.01) num_clients = 10 num_requests_per_client = 10 processes = [] for i in range(num_clients): p = mp.Process(target=func, args=(num_requests_per_client, self.file_path)) processes.append(p) p.start() for p in processes: p.join() self.server.run_once() # Allows the server to process all requests self.assertEqual(2 * num_clients * num_requests_per_client, self.server._request_count) @staticmethod def _run(file_path, timeout, duration): client = timer.FileTimerClient(file_path) timer.configure(client) with timer.expires(after=timeout): time.sleep(duration) @unittest.skipIf(TEST_WITH_TSAN, "test is tsan incompatible") def test_timer(self): timeout = 0.1 duration = 1 p = mp.Process(target=self._run, args=(self.file_path, timeout, duration)) p.start() p.join() self.assertEqual(-signal.SIGKILL, p.exitcode)
def func2(n, file_path): if file_path is not None: timer.configure(timer.FileTimerClient(file_path)) if n > 0: with timer.expires(after=0.1): func2(n - 1, None) time.sleep(0.2) class FileTimerTest(TestCase): def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = f"/tmp/test_file_path_{os.getpid()}_{uuid.uuid4()}" self.server = timer.FileTimerServer( self.file_path, "test", self.max_interval ) self.server.start() def tearDown(self): super().tearDown() self.server.stop() def test_exception_propagation(self): with self.assertRaises(RuntimeError, msg="foobar"): with timer.expires(after=1): raise RuntimeError("foobar") def test_no_client(self): # no timer client configured; exception expected timer.configure(None) with self.assertRaises(RuntimeError): with timer.expires(after=1): pass def test_client_interaction(self): # no timer client configured but one passed in explicitly # no exception expected timer_client = timer.FileTimerClient(self.file_path) timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire) timer_client.release = mock.MagicMock(wraps=timer_client.release) with timer.expires(after=1, scope="test", client=timer_client): pass timer_client.acquire.assert_called_once_with("test", mock.ANY) timer_client.release.assert_called_once_with("test") def test_happy_path(self): timer.configure(timer.FileTimerClient(self.file_path)) with timer.expires(after=0.5): time.sleep(0.1) def test_get_timer_recursive(self): """ If a function acquires a countdown timer with default scope, then recursive calls to the function should re-acquire the timer rather than creating a new one. That is only the last recursive call's timer will take effect. """ timer.configure(timer.FileTimerClient(self.file_path)) # func should not time out def func(n): if n > 0: with timer.expires(after=0.1): func(n - 1) time.sleep(0.05) func(4) p = mp.Process(target=func2, args=(2, self.file_path)) p.start() p.join() self.assertEqual(-signal.SIGKILL, p.exitcode) def test_multiple_clients_interaction(self): # func should not time out def func(n, file_path): if file_path is not None: timer.configure(timer.FileTimerClient(file_path)) if n > 0: with timer.expires(after=100): func(n - 1, None) time.sleep(0.01) num_clients = 10 num_requests_per_client = 10 processes = [] for i in range(num_clients): p = mp.Process( target=func, args=(num_requests_per_client, self.file_path) ) processes.append(p) p.start() for p in processes: p.join() self.server.run_once() # Allows the server to process all requests self.assertEqual( 2 * num_clients * num_requests_per_client, self.server._request_count ) @mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker") def test_exit_before_release(self, mock_reap): def func1(file_path): client = timer.FileTimerClient(file_path) timer.configure(client) expire = time.time() + 2 client.acquire("test_scope", expire) time.sleep(1) p = mp.Process(target=func1, args=(self.file_path,)) p.start() p.join() time.sleep(2) self.server.run_once() # Allows the server to process all requests mock_reap.assert_not_called() self.assertEqual(0, len(self.server._timers)) @mock.patch("torch.distributed.elastic.timer.FileTimerServer._reap_worker") @mock.patch( "torch.distributed.elastic.timer.FileTimerServer.is_process_running" ) def test_exit_before_release_reap(self, mock_pid_exists, mock_reap): def func1(file_path): client = timer.FileTimerClient(file_path) timer.configure(client) expire = time.time() + 2 client.acquire("test_scope", expire) time.sleep(1) mock_pid_exists.return_value = True p = mp.Process(target=func1, args=(self.file_path,)) p.start() p.join() time.sleep(2) self.server.run_once() # Allows the server to process all requests mock_reap.assert_called() self.assertEqual(0, len(self.server._timers)) @staticmethod def _run(file_path, timeout, duration): client = timer.FileTimerClient(file_path) timer.configure(client) with timer.expires(after=timeout): time.sleep(duration) @unittest.skipIf(TEST_WITH_TSAN, "test is tsan incompatible") def test_timer(self): timeout = 0.1 duration = 1 p = mp.Process(target=self._run, args=(self.file_path, timeout, duration)) p.start() p.join() self.assertEqual(-signal.SIGKILL, p.exitcode)
import multiprocessing as mp import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
setUp
def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4()) self.server = timer.FileTimerServer(self.file_path, self.max_interval) self.server.start()
def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = f"/tmp/test_file_path_{os.getpid()}_{uuid.uuid4()}" self.server = timer.FileTimerServer( self.file_path, "test", self.max_interval ) self.server.start()
import multiprocessing as mp import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerTest(TestCase):
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
test_apply_activation_checkpointing
def test_apply_activation_checkpointing(self): """ Ensures that `apply_activation_checkpointing` can be used to swap modules for their checkpoint-wrapped counterparts given a model. """ class LinearWithBatchNorm(nn.Module): def __init__(self): super().__init__() self.lin = nn.Linear(10, 10) self.bn = nn.BatchNorm1d(10) self.nested_linear = nn.Sequential(nn.Linear(10, 10)) def forward(self, x): return self.bn(self.nested_linear(self.lin(x))) class MyModel(nn.Module): def __init__(self): super().__init__() self.seq = nn.Sequential( LinearWithBatchNorm(), LinearWithBatchNorm(), LinearWithBatchNorm() ) def forward(self, x): return self.seq(x) def check_fn(l): return isinstance(l, nn.Linear) n_linear = None for wrapper in [ partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.REENTRANT), partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT), offload_wrapper, ]: model = MyModel() if n_linear is None: n_linear = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) with self.subTest(wrapper=wrapper): apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, check_fn=check_fn ) n_linear_wrapped = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) n_checkpointed = sum( 1 if isinstance(x, (CheckpointWrapper, OffloadWrapper)) else 0 for x in model.modules() ) self.assertEqual(n_checkpointed, n_linear_wrapped) self.assertEqual(n_linear, n_linear_wrapped) for j in range(3): self.assertTrue( isinstance( model.seq[j].lin, (CheckpointWrapper, OffloadWrapper) ) ) self.assertTrue( isinstance( model.seq[j].nested_linear[0], (CheckpointWrapper, OffloadWrapper), ) ) inp = torch.randn(4, 10, requires_grad=True) for i in range(6): # Kwarg input loss = model(x=inp).sum() self.assertTrue(loss.requires_grad) loss.backward() # ensure checkpointed part of model has gradients for j in range(3): weight_lin = model.seq[j].lin._checkpoint_wrapped_module.weight bias_lin = model.seq[j].lin._checkpoint_wrapped_module.bias weight_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.weight ) bias_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.bias ) for param in [ weight_lin, bias_lin, weight_nested_lin, bias_nested_lin, ]: self.assertTrue(param.requires_grad) self.assertFalse(param.grad is None)
def test_apply_activation_checkpointing(self): """ Ensures that `apply_activation_checkpointing` can be used to swap modules for their checkpoint-wrapped counterparts given a model. """ class LinearWithBatchNorm(nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10) self.bn = nn.BatchNorm1d(10) self.nested_linear = nn.Sequential(nn.Linear(10, 10)) def forward(self, x): return self.bn(self.nested_linear(self.lin(x))) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( LinearWithBatchNorm(), LinearWithBatchNorm(), LinearWithBatchNorm() ) def forward(self, x): return self.seq(x) def check_fn(l): return isinstance(l, nn.Linear) n_linear = None for i, wrapper in enumerate( [ partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.REENTRANT), partial( checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT ), offload_wrapper, ] ): model = MyModel() if n_linear is None: n_linear = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) with self.subTest(wrapper=wrapper): if i != 0: apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, check_fn=check_fn ) else: apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, auto_wrap_policy=ModuleWrapPolicy({nn.Linear}), ) n_linear_wrapped = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) n_checkpointed = sum( 1 if isinstance(x, (CheckpointWrapper, OffloadWrapper)) else 0 for x in model.modules() ) self.assertEqual(n_checkpointed, n_linear_wrapped) self.assertEqual(n_linear, n_linear_wrapped) for j in range(3): self.assertTrue( isinstance( model.seq[j].lin, (CheckpointWrapper, OffloadWrapper) ) ) self.assertTrue( isinstance( model.seq[j].nested_linear[0], (CheckpointWrapper, OffloadWrapper), ) ) inp = torch.randn(4, 10, requires_grad=True) for i in range(6): # Kwarg input loss = model(x=inp).sum() self.assertTrue(loss.requires_grad) loss.backward() # ensure checkpointed part of model has gradients for j in range(3): weight_lin = model.seq[j].lin._checkpoint_wrapped_module.weight bias_lin = model.seq[j].lin._checkpoint_wrapped_module.bias weight_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.weight ) bias_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.bias ) for param in [ weight_lin, bias_lin, weight_nested_lin, bias_nested_lin, ]: self.assertTrue(param.requires_grad) self.assertFalse(param.grad is None)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class CheckpointWrapperTest(TestCase):
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class CheckpointWrapperTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
__init__
def __init__(self): super().__init__() self.lin = nn.Linear(10, 10)
def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
__init__
def __init__(self): super().__init__() self.lin = nn.Linear(10, 10)
def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
check_fn
def check_fn(l): return isinstance(l, nn.Linear) n_linear = None for wrapper in [ partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.REENTRANT), partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT), offload_wrapper, ]: model = MyModel() if n_linear is None: n_linear = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) with self.subTest(wrapper=wrapper): apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, check_fn=check_fn ) n_linear_wrapped = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) n_checkpointed = sum( 1 if isinstance(x, (CheckpointWrapper, OffloadWrapper)) else 0 for x in model.modules() ) self.assertEqual(n_checkpointed, n_linear_wrapped) self.assertEqual(n_linear, n_linear_wrapped) for j in range(3): self.assertTrue( isinstance( model.seq[j].lin, (CheckpointWrapper, OffloadWrapper) ) ) self.assertTrue( isinstance( model.seq[j].nested_linear[0], (CheckpointWrapper, OffloadWrapper), ) ) inp = torch.randn(4, 10, requires_grad=True) for i in range(6): # Kwarg input loss = model(x=inp).sum() self.assertTrue(loss.requires_grad) loss.backward() # ensure checkpointed part of model has gradients for j in range(3): weight_lin = model.seq[j].lin._checkpoint_wrapped_module.weight bias_lin = model.seq[j].lin._checkpoint_wrapped_module.bias weight_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.weight ) bias_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.bias ) for param in [ weight_lin, bias_lin, weight_nested_lin, bias_nested_lin, ]: self.assertTrue(param.requires_grad) self.assertFalse(param.grad is None)
def check_fn(l): return isinstance(l, nn.Linear) n_linear = None for i, wrapper in enumerate( [ partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.REENTRANT), partial( checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT ), offload_wrapper, ] ): model = MyModel() if n_linear is None: n_linear = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) with self.subTest(wrapper=wrapper): if i != 0: apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, check_fn=check_fn ) else: apply_activation_checkpointing( model, checkpoint_wrapper_fn=wrapper, auto_wrap_policy=ModuleWrapPolicy({nn.Linear}), ) n_linear_wrapped = sum( 1 if isinstance(x, nn.Linear) else 0 for x in model.modules() ) n_checkpointed = sum( 1 if isinstance(x, (CheckpointWrapper, OffloadWrapper)) else 0 for x in model.modules() ) self.assertEqual(n_checkpointed, n_linear_wrapped) self.assertEqual(n_linear, n_linear_wrapped) for j in range(3): self.assertTrue( isinstance( model.seq[j].lin, (CheckpointWrapper, OffloadWrapper) ) ) self.assertTrue( isinstance( model.seq[j].nested_linear[0], (CheckpointWrapper, OffloadWrapper), ) ) inp = torch.randn(4, 10, requires_grad=True) for i in range(6): # Kwarg input loss = model(x=inp).sum() self.assertTrue(loss.requires_grad) loss.backward() # ensure checkpointed part of model has gradients for j in range(3): weight_lin = model.seq[j].lin._checkpoint_wrapped_module.weight bias_lin = model.seq[j].lin._checkpoint_wrapped_module.bias weight_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.weight ) bias_nested_lin = ( model.seq[j] .nested_linear[0] ._checkpoint_wrapped_module.bias ) for param in [ weight_lin, bias_lin, weight_nested_lin, bias_nested_lin, ]: self.assertTrue(param.requires_grad) self.assertFalse(param.grad is None)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions"
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions"
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_backward_prefetch.py
world_size
def world_size(self): return 2
import sys from typing import List from unittest.mock import patch import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_handle_fqns_from_root from torch.distributed.fsdp._flat_param import HandleTrainingState from torch.distributed.fsdp._runtime_utils import ( _get_handle_to_prefetch, _get_training_state, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN NUM_ITERS = 2 DECODER_PARAM_FQNS = [ "decoder.layers.{index}.self_attn.in_proj_weight", "decoder.layers.{index}.self_attn.in_proj_bias", "decoder.layers.{index}.self_attn.out_proj.weight", "decoder.layers.{index}.self_attn.out_proj.bias", "decoder.layers.{index}.multihead_attn.in_proj_weight", "decoder.layers.{index}.multihead_attn.in_proj_bias", "decoder.layers.{index}.multihead_attn.out_proj.weight", "decoder.layers.{index}.multihead_attn.out_proj.bias", "decoder.layers.{index}.linear1.weight", "decoder.layers.{index}.linear1.bias", "decoder.layers.{index}.linear2.weight", "decoder.layers.{index}.linear2.bias", "decoder.layers.{index}.norm1.weight", "decoder.layers.{index}.norm1.bias", "decoder.layers.{index}.norm2.weight", "decoder.layers.{index}.norm2.bias", "decoder.layers.{index}.norm3.weight", "decoder.layers.{index}.norm3.bias", ] ENCODER_PARAM_FQNS = [ "encoder.layers.{index}.self_attn.in_proj_weight", "encoder.layers.{index}.self_attn.in_proj_bias", "encoder.layers.{index}.self_attn.out_proj.weight", "encoder.layers.{index}.self_attn.out_proj.bias", "encoder.layers.{index}.linear1.weight", "encoder.layers.{index}.linear1.bias", "encoder.layers.{index}.linear2.weight", "encoder.layers.{index}.linear2.bias", "encoder.layers.{index}.norm1.weight", "encoder.layers.{index}.norm1.bias", "encoder.layers.{index}.norm2.weight", "encoder.layers.{index}.norm2.bias", ] TOTAL_NUM_PREFETCH_FOR_PRE = 12 TOTAL_NUM_PREFETCH_FOR_POST = 11 ENCODER_BEGIN_INDEX_FOR_PRE = 6 ENCODER_BEGIN_INDEX_FOR_POST = 5 ENCODER_PREFETCH_NUM = 5 class TestBackwardPrefetch(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_backward_prefetch.py
patched_get_handle_to_prefetch
def patched_get_handle_to_prefetch(*args, **kwargs): handle = orig_get_handle_to_prefetch(*args, **kwargs) self.assertEqual( len(args), 2, "expect _get_handle_to_prefetch(state, current_handle)" ) state = args[0] current_handle = args[1] training_state = _get_training_state(current_handle) if ( training_state == HandleTrainingState.BACKWARD_PRE and state.backward_prefetch == BackwardPrefetch.BACKWARD_PRE ) or ( training_state == HandleTrainingState.BACKWARD_POST and state.backward_prefetch == BackwardPrefetch.BACKWARD_POST ): nonlocal all_handle_fqns # FQNs prefixed from the root module # state._exec_order_data.param_to_fqn fqns = _get_handle_fqns_from_root(state, handle) all_handle_fqns.append(fqns) return handle # flat params from prefetch handle should match # DECODER_PARAM_FQNS and ENCODER_PARAM_FQNS with patch( "torch.distributed.fsdp._runtime_utils._get_handle_to_prefetch", patched_get_handle_to_prefetch, ): for _ in range(NUM_ITERS): optim.zero_grad() loss = model(src, tgt).sum() loss.backward() optim.step() if backward_prefetch is None: self.assertEqual(len(all_handle_fqns), 0) continue elif backward_prefetch == BackwardPrefetch.BACKWARD_PRE: # state._exec_order_data.handles_post_forward_order # equals forward order # encoder 0...5 -> decoder 0...5 -> root # pre-backward hook order # root -> decoder 5...0 -> encoder 5...0 # prefetch order # decoder 5...0 -> encoder 5...0 -> None # None: when current_handle=encoder 0, # _get_handle_to_prefetch returns None # +1 is for the above None encoder_begin_index = ENCODER_BEGIN_INDEX_FOR_PRE self.assertEqual( len(all_handle_fqns), TOTAL_NUM_PREFETCH_FOR_PRE + 1 ) elif backward_prefetch == BackwardPrefetch.BACKWARD_POST: # state._exec_order_data.handles_post_forward_order # equals forward order (same as BACKWARD_PRE) # encoder 0...5 -> decoder 0...5 -> root # post-backward hook (AccumulateGrad) order # decoder 5, 4...0 -> encoder 5...0 -> root # prefetch order # decoder 4...0 -> encoder 5...0 -> None -> None # 1st None: when current_handle=encoder 0, # _get_handle_to_prefetch returns None # 2nd None: when current_handle=root, # get decoder 5 inside _get_handle_to_prefetch # but not needed since decoder 5 is computed already # +2 is for the above Nones encoder_begin_index = ENCODER_BEGIN_INDEX_FOR_POST self.assertEqual( len(all_handle_fqns), TOTAL_NUM_PREFETCH_FOR_POST + 2 ) # ith_prefetch: 0, 1st, 2nd, 3rd, 4th ... ith prefetch for ith_prefetch, fqns in enumerate(all_handle_fqns): if ith_prefetch >= 0 and ith_prefetch < encoder_begin_index: layer_index = encoder_begin_index - 1 - ith_prefetch self.assertEqual( fqns, [x.format(index=layer_index) for x in DECODER_PARAM_FQNS], ) elif ( ith_prefetch >= encoder_begin_index and ith_prefetch <= encoder_begin_index + ENCODER_PREFETCH_NUM ): layer_index = ( encoder_begin_index + ENCODER_PREFETCH_NUM - ith_prefetch ) self.assertEqual( fqns, [x.format(index=layer_index) for x in ENCODER_PARAM_FQNS], ) else: self.assertTrue(fqns is None) all_handle_fqns = []
import sys from typing import List from unittest.mock import patch import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_handle_fqns_from_root from torch.distributed.fsdp._flat_param import HandleTrainingState from torch.distributed.fsdp._runtime_utils import ( _get_handle_to_prefetch, _get_training_state, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN NUM_ITERS = 2 DECODER_PARAM_FQNS = [ "decoder.layers.{index}.self_attn.in_proj_weight", "decoder.layers.{index}.self_attn.in_proj_bias", "decoder.layers.{index}.self_attn.out_proj.weight", "decoder.layers.{index}.self_attn.out_proj.bias", "decoder.layers.{index}.multihead_attn.in_proj_weight", "decoder.layers.{index}.multihead_attn.in_proj_bias", "decoder.layers.{index}.multihead_attn.out_proj.weight", "decoder.layers.{index}.multihead_attn.out_proj.bias", "decoder.layers.{index}.linear1.weight", "decoder.layers.{index}.linear1.bias", "decoder.layers.{index}.linear2.weight", "decoder.layers.{index}.linear2.bias", "decoder.layers.{index}.norm1.weight", "decoder.layers.{index}.norm1.bias", "decoder.layers.{index}.norm2.weight", "decoder.layers.{index}.norm2.bias", "decoder.layers.{index}.norm3.weight", "decoder.layers.{index}.norm3.bias", ] ENCODER_PARAM_FQNS = [ "encoder.layers.{index}.self_attn.in_proj_weight", "encoder.layers.{index}.self_attn.in_proj_bias", "encoder.layers.{index}.self_attn.out_proj.weight", "encoder.layers.{index}.self_attn.out_proj.bias", "encoder.layers.{index}.linear1.weight", "encoder.layers.{index}.linear1.bias", "encoder.layers.{index}.linear2.weight", "encoder.layers.{index}.linear2.bias", "encoder.layers.{index}.norm1.weight", "encoder.layers.{index}.norm1.bias", "encoder.layers.{index}.norm2.weight", "encoder.layers.{index}.norm2.bias", ] TOTAL_NUM_PREFETCH_FOR_PRE = 12 TOTAL_NUM_PREFETCH_FOR_POST = 11 ENCODER_BEGIN_INDEX_FOR_PRE = 6 ENCODER_BEGIN_INDEX_FOR_POST = 5 ENCODER_PREFETCH_NUM = 5
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
func1
def func1(file_path): client = timer.FileTimerClient(file_path) timer.configure(client) expire = time.time() + 2 client.acquire("test_scope", expire) time.sleep(1) p = mp.Process(target=func1, args=(self.file_path,)) p.start() p.join() time.sleep(2) self.server.run_once() # Allows the server to process all requests mock_reap.assert_not_called() self.assertEqual(0, len(self.server._timers))
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
_request_on_interval
def _request_on_interval(file_path, n, interval, sem): """ enqueues ``n`` timer requests into ``mp_queue`` one element per interval seconds. Releases the given semaphore once before going to work. """ client = timer.FileTimerClient(file_path) sem.release() for i in range(0, n): client.acquire("test_scope", 0) time.sleep(interval) class FileTimerClientTest(TestCase): def test_send_request_without_server(self): client = timer.FileTimerClient("test_file") timer.configure(client) with self.assertRaises(BrokenPipeError): with timer.expires(after=0.1): time.sleep(0.1) class FileTimerServerTest(TestCase): def setUp(self): super().setUp() self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4()) self.max_interval = 0.01 self.server = timer.FileTimerServer(self.file_path, self.max_interval) def tearDown(self): super().tearDown() self.server.stop() def test_watchdog_call_count(self): """ checks that the watchdog function ran wait/interval +- 1 times """ self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog) self.server.start() test_pid = -3 client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=test_pid, scope="test0")) wait = 0.1 time.sleep(wait) self.server.stop() watchdog_call_count = self.server._run_watchdog.call_count self.assertGreaterEqual( watchdog_call_count, int(wait / self.max_interval) - 1 ) self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1) def test_watchdog_empty_queue(self): """ checks that the watchdog can run on an empty pipe """ self.server.start() def _expired_timer(self, pid, scope): expired = time.time() - 60 return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=expired, signal=signal.SIGKILL) def _valid_timer(self, pid, scope): valid = time.time() + 60 return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=valid, signal=signal.SIGKILL) def _release_timer(self, pid, scope): return timer.FileTimerRequest(worker_pid=pid, scope_id=scope, expiration_time=-1) @mock.patch("os.kill") def test_expired_timers(self, mock_os_kill): """ tests that a single expired timer on a process should terminate the process and clean up all pending timers that was owned by the process """ self.server.start() test_pid = -3 client = timer.FileTimerClient(self.file_path) client._send_request(self._expired_timer(pid=test_pid, scope="test1")) client._send_request(self._valid_timer(pid=test_pid, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(0, len(self.server._timers)) mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL) @mock.patch("os.kill") def test_send_request_release(self, mock_os_kill): """ tests that: 1. a timer can be acquired then released (should not terminate process) 2. a timer can be vacuously released (e.g. no-op) """ self.server.start() client = timer.FileTimerClient(self.file_path) test_pid = -3 client._send_request(self._valid_timer(pid=test_pid, scope="test1")) client._send_request(self._release_timer(pid=test_pid, scope="test1")) client._send_request(self._release_timer(pid=test_pid, scope="test2")) self.assertEqual(0, len(self.server._timers)) mock_os_kill.assert_not_called() @mock.patch("os.kill") def test_valid_timers(self, mock_os_kill): """ tests that valid timers are processed correctly and the process is left alone """ self.server.start() client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=-3, scope="test1")) client._send_request(self._valid_timer(pid=-3, scope="test2")) client._send_request(self._valid_timer(pid=-2, scope="test1")) client._send_request(self._valid_timer(pid=-2, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(4, len(self.server._timers)) self.assertTrue((-3, "test1") in self.server._timers) self.assertTrue((-3, "test2") in self.server._timers) self.assertTrue((-2, "test1") in self.server._timers) self.assertTrue((-2, "test2") in self.server._timers) mock_os_kill.assert_not_called()
def _request_on_interval(file_path, n, interval, sem): """ enqueues ``n`` timer requests into ``mp_queue`` one element per interval seconds. Releases the given semaphore once before going to work. """ client = timer.FileTimerClient(file_path) sem.release() for i in range(0, n): client.acquire("test_scope", 0) time.sleep(interval) class FileTimerClientTest(TestCase): def test_send_request_without_server(self): client = timer.FileTimerClient("test_file") timer.configure(client) with self.assertRaises(BrokenPipeError): with timer.expires(after=0.1): time.sleep(0.1) class FileTimerServerTest(TestCase): def setUp(self): super().setUp() self.file_path = f"/tmp/test_file_path_{os.getpid()}_{uuid.uuid4()}" self.max_interval = 0.01 self.server = timer.FileTimerServer( self.file_path, "test", self.max_interval ) def tearDown(self): super().tearDown() self.server.stop() def test_watchdog_call_count(self): """ checks that the watchdog function ran wait/interval +- 1 times """ self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog) self.server.start() test_pid = -3 client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=test_pid, scope="test0")) wait = 0.1 time.sleep(wait) self.server.stop() watchdog_call_count = self.server._run_watchdog.call_count self.assertGreaterEqual( watchdog_call_count, int(wait / self.max_interval) - 1 ) self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1) def test_watchdog_empty_queue(self): """ checks that the watchdog can run on an empty pipe """ self.server.start() def _expired_timer(self, pid, scope): expired = time.time() - 60 return timer.FileTimerRequest( worker_pid=pid, scope_id=scope, expiration_time=expired, signal=signal.SIGKILL, ) def _valid_timer(self, pid, scope): valid = time.time() + 60 return timer.FileTimerRequest( worker_pid=pid, scope_id=scope, expiration_time=valid, signal=signal.SIGKILL, ) def _release_timer(self, pid, scope): return timer.FileTimerRequest( worker_pid=pid, scope_id=scope, expiration_time=-1 ) @mock.patch("os.kill") @mock.patch( "torch.distributed.elastic.timer.file_based_local_timer.log_debug_info_for_expired_timers" ) def test_expired_timers(self, mock_debug_info, mock_os_kill): """ tests that a single expired timer on a process should terminate the process and clean up all pending timers that was owned by the process """ self.server.start() test_pid = -3 client = timer.FileTimerClient(self.file_path) client._send_request(self._expired_timer(pid=test_pid, scope="test1")) client._send_request(self._valid_timer(pid=test_pid, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(0, len(self.server._timers)) mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL) mock_debug_info.assert_called() @mock.patch("os.kill") def test_send_request_release(self, mock_os_kill): """ tests that: 1. a timer can be acquired then released (should not terminate process) 2. a timer can be vacuously released (e.g. no-op) """ self.server.start() client = timer.FileTimerClient(self.file_path) test_pid = -3 client._send_request(self._valid_timer(pid=test_pid, scope="test1")) client._send_request(self._release_timer(pid=test_pid, scope="test1")) client._send_request(self._release_timer(pid=test_pid, scope="test2")) self.assertEqual(0, len(self.server._timers)) mock_os_kill.assert_not_called() @mock.patch( "torch.distributed.elastic.timer.FileTimerServer.is_process_running" ) @mock.patch("os.kill") def test_valid_timers(self, mock_os_kill, mock_pid_exists): """ tests that valid timers are processed correctly and the process is left alone """ self.server.start() mock_pid_exists.return_value = True client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=-3, scope="test1")) client._send_request(self._valid_timer(pid=-3, scope="test2")) client._send_request(self._valid_timer(pid=-2, scope="test1")) client._send_request(self._valid_timer(pid=-2, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(4, len(self.server._timers)) self.assertTrue((-3, "test1") in self.server._timers) self.assertTrue((-3, "test2") in self.server._timers) self.assertTrue((-2, "test1") in self.server._timers) self.assertTrue((-2, "test2") in self.server._timers) mock_os_kill.assert_not_called()
import multiprocessing as mp import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
setUp
def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4()) self.server = timer.FileTimerServer(self.file_path, self.max_interval) self.server.start()
def setUp(self): super().setUp() self.max_interval = 0.01 self.file_path = f"/tmp/test_file_path_{os.getpid()}_{uuid.uuid4()}" self.server = timer.FileTimerServer( self.file_path, "test", self.max_interval ) self.server.start()
import multiprocessing as mp import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerTest(TestCase):
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/timer/file_based_local_timer_test.py
test_valid_timers
def test_valid_timers(self, mock_os_kill): """ tests that valid timers are processed correctly and the process is left alone """ self.server.start() client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=-3, scope="test1")) client._send_request(self._valid_timer(pid=-3, scope="test2")) client._send_request(self._valid_timer(pid=-2, scope="test1")) client._send_request(self._valid_timer(pid=-2, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(4, len(self.server._timers)) self.assertTrue((-3, "test1") in self.server._timers) self.assertTrue((-3, "test2") in self.server._timers) self.assertTrue((-2, "test1") in self.server._timers) self.assertTrue((-2, "test2") in self.server._timers) mock_os_kill.assert_not_called()
def test_valid_timers(self, mock_os_kill, mock_pid_exists): """ tests that valid timers are processed correctly and the process is left alone """ self.server.start() mock_pid_exists.return_value = True client = timer.FileTimerClient(self.file_path) client._send_request(self._valid_timer(pid=-3, scope="test1")) client._send_request(self._valid_timer(pid=-3, scope="test2")) client._send_request(self._valid_timer(pid=-2, scope="test1")) client._send_request(self._valid_timer(pid=-2, scope="test2")) self.server.run_once() # Allows the server to process all requests self.assertEqual(4, len(self.server._timers)) self.assertTrue((-3, "test1") in self.server._timers) self.assertTrue((-3, "test2") in self.server._timers) self.assertTrue((-2, "test1") in self.server._timers) self.assertTrue((-2, "test2") in self.server._timers) mock_os_kill.assert_not_called()
import multiprocessing as mp import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerServerTest(TestCase):
import multiprocessing as mp import os import signal import time import unittest import unittest.mock as mock import uuid import torch.distributed.elastic.timer as timer from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class FileTimerServerTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/timer/local_timer_example.py
_stuck_function
def _stuck_function(rank, mp_queue): timer.configure(timer.LocalTimerClient(mp_queue)) with timer.expires(after=1): time.sleep(5) # timer is not supported on macos or windowns if not (IS_WINDOWS or IS_MACOS): class LocalTimerExample(TestCase): """ Demonstrates how to use LocalTimerServer and LocalTimerClient to enforce expiration of code-blocks. Since torch multiprocessing's ``start_process`` method currently does not take the multiprocessing context as parameter argument there is no way to create the mp.Queue in the correct context BEFORE spawning child processes. Once the ``start_process`` API is changed in torch, then re-enable ``test_torch_mp_example`` unittest. As of now this will SIGSEGV. """ @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible") def test_torch_mp_example(self): # in practice set the max_interval to a larger value (e.g. 60 seconds) mp_queue = mp.get_context("spawn").Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 # all processes should complete successfully # since start_process does NOT take context as parameter argument yet # this method WILL FAIL (hence the test is disabled) torch_mp.spawn( fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True ) with self.assertRaises(Exception): # torch.multiprocessing.spawn kills all sub-procs # if one of them gets killed torch_mp.spawn( fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True ) server.stop() @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible") def test_example_start_method_spawn(self): self._run_example_with(start_method="spawn") # @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible") # def test_example_start_method_forkserver(self): # self._run_example_with(start_method="forkserver") def _run_example_with(self, start_method): spawn_ctx = mp.get_context(start_method) mp_queue = spawn_ctx.Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 processes = [] for i in range(0, world_size): if i % 2 == 0: p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue)) else: p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue)) p.start() processes.append(p) for i in range(0, world_size): p = processes[i] p.join() if i % 2 == 0: self.assertEqual(-signal.SIGKILL, p.exitcode) else: self.assertEqual(0, p.exitcode) server.stop() if __name__ == "__main__": run_tests()
def _stuck_function(rank, mp_queue): timer.configure(timer.LocalTimerClient(mp_queue)) with timer.expires(after=1): time.sleep(5) # timer is not supported on macos or windows if not (IS_WINDOWS or IS_MACOS): class LocalTimerExample(TestCase): """ Demonstrates how to use LocalTimerServer and LocalTimerClient to enforce expiration of code-blocks. Since torch multiprocessing's ``start_process`` method currently does not take the multiprocessing context as parameter argument there is no way to create the mp.Queue in the correct context BEFORE spawning child processes. Once the ``start_process`` API is changed in torch, then re-enable ``test_torch_mp_example`` unittest. As of now this will SIGSEGV. """ @skip_but_pass_in_sandcastle_if( TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible" ) def test_torch_mp_example(self): # in practice set the max_interval to a larger value (e.g. 60 seconds) mp_queue = mp.get_context("spawn").Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 # all processes should complete successfully # since start_process does NOT take context as parameter argument yet # this method WILL FAIL (hence the test is disabled) torch_mp.spawn( fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True ) with self.assertRaises(Exception): # torch.multiprocessing.spawn kills all sub-procs # if one of them gets killed torch_mp.spawn( fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True ) server.stop() @skip_but_pass_in_sandcastle_if( TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible" ) def test_example_start_method_spawn(self): self._run_example_with(start_method="spawn") # @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible") # def test_example_start_method_forkserver(self): # self._run_example_with(start_method="forkserver") def _run_example_with(self, start_method): spawn_ctx = mp.get_context(start_method) mp_queue = spawn_ctx.Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 processes = [] for i in range(0, world_size): if i % 2 == 0: p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue)) else: p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue)) p.start() processes.append(p) for i in range(0, world_size): p = processes[i] p.join() if i % 2 == 0: self.assertEqual(-signal.SIGKILL, p.exitcode) else: self.assertEqual(0, p.exitcode) server.stop() if __name__ == "__main__": run_tests()
import logging import multiprocessing as mp import signal import time import torch.distributed.elastic.timer as timer import torch.multiprocessing as torch_mp from torch.testing._internal.common_utils import ( TEST_WITH_DEV_DBG_ASAN, run_tests, IS_WINDOWS, IS_MACOS, sandcastle_skip_if, TestCase )
import logging import multiprocessing as mp import signal import time import torch.distributed.elastic.timer as timer import torch.multiprocessing as torch_mp from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, skip_but_pass_in_sandcastle_if, TEST_WITH_DEV_DBG_ASAN, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/distributed_test.py
test_create_store_with_libuv_support
def test_create_store_with_libuv_support(self): world_size = 1 wait_for_workers = False localhost = socket.gethostname() os.environ["USE_LIBUV"] = "0" store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) self.assertFalse(store.libuvBackend) del os.environ["USE_LIBUV"] assert "USE_LIBUV" not in os.environ # libuv backend is enabled by default store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) self.assertTrue(store.libuvBackend)
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed import DistNetworkError, DistStoreError from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class DistributedUtilTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/distributed_test.py
test_port_already_in_use_on_worker
def test_port_already_in_use_on_worker(self): sock = get_socket_with_port() with closing(sock): port = sock.getsockname()[1] # on the worker port conflict shouldn't matter, it should just timeout # since we never created a server with self.assertRaises(TimeoutError): create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=port, timeout=1, )
def test_port_already_in_use_on_worker(self): sock = get_socket_with_port() with closing(sock): port = sock.getsockname()[1] # on the worker port conflict shouldn't matter, it should just timeout # since we never created a server with self.assertRaises(DistNetworkError): create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=port, timeout=1, )
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase ) class DistributedUtilTest(TestCase):
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed import DistNetworkError, DistStoreError from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class DistributedUtilTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/util_test.py
test_get_all_rank_0
def test_get_all_rank_0(self): store = mock.MagicMock() world_size = 3 store_util.get_all(store, 0, "test/store", world_size) # omit empty kwargs, get only key actual_set_call_args = [ call_args[0][0] for call_args in store.set.call_args_list ] self.assertListEqual(["test/store0.FIN"], actual_set_call_args) actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list] expected_get_call_args = [ ("test/store0",), ("test/store1",), ("test/store2",), ("test/store0.FIN",), ("test/store1.FIN",), ("test/store2.FIN",), ] self.assertListEqual(expected_get_call_args, actual_get_call_args)
def test_get_all_rank_0(self): world_size = 3 store = MockStore() store_util.get_all(store, 0, "test/store", world_size) self.assertListEqual( store.ops, [ ("multi_get", ["test/store0", "test/store1", "test/store2"]), ("add", "test/store/finished/num_members", 1), ("set", "test/store/finished/last_member", "<val_ignored>"), ("wait", ["test/store/finished/last_member"]), ], )
from unittest import mock import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase):
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/util_test.py
test_get_all_rank_n
def test_get_all_rank_n(self): store = mock.MagicMock() world_size = 3 store_util.get_all(store, 1, "test/store", world_size) # omit empty kwargs, get only key actual_set_call_args = [ call_args[0][0] for call_args in store.set.call_args_list ] self.assertListEqual(["test/store1.FIN"], actual_set_call_args) actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list] expected_get_call_args = [ ("test/store0",), ("test/store1",), ("test/store2",), ] self.assertListEqual(expected_get_call_args, actual_get_call_args)
def test_get_all_rank_n(self): store = MockStore() world_size = 3 store_util.get_all(store, 1, "test/store", world_size) self.assertListEqual( store.ops, [ ("multi_get", ["test/store0", "test/store1", "test/store2"]), ("add", "test/store/finished/num_members", 1), ("set", "test/store/finished/last_member", "<val_ignored>"), ], )
from unittest import mock import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase):
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/util_test.py
test_synchronize
def test_synchronize(self): store_mock = mock.MagicMock() data = "data0".encode(encoding="UTF-8") store_util.synchronize(store_mock, data, 0, 3, key_prefix="torchelastic/test") actual_set_call_args = store_mock.set.call_args_list # omit empty kwargs actual_set_call_args = [call_args[0] for call_args in actual_set_call_args] expected_set_call_args = [ ("torchelastic/test0", b"data0"), ("torchelastic/test0.FIN", b"FIN"), ] self.assertListEqual(expected_set_call_args, actual_set_call_args) expected_get_call_args = [ ("torchelastic/test0",), ("torchelastic/test1",), ("torchelastic/test2",), ("torchelastic/test0.FIN",), ("torchelastic/test1.FIN",), ("torchelastic/test2.FIN",), ] actual_get_call_args = store_mock.get.call_args_list actual_get_call_args = [call_args[0] for call_args in actual_get_call_args] self.assertListEqual(expected_get_call_args, actual_get_call_args)
def test_synchronize(self): store = MockStore() data = b"data0" store_util.synchronize(store, data, 0, 3, key_prefix="test/store") self.assertListEqual( store.ops, [ ("timeout",), ("set_timeout", datetime.timedelta(seconds=300)), ("set", "test/store0", data), ("multi_get", ["test/store0", "test/store1", "test/store2"]), ("add", "test/store/finished/num_members", 1), ("set", "test/store/finished/last_member", "<val_ignored>"), ("wait", ["test/store/finished/last_member"]), ("set_timeout", datetime.timedelta(seconds=store._TEST_TIMEOUT)), ], )
from unittest import mock import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase):
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/util_test.py
f
def f(i: int): return store_util.synchronize( store, f"data{i}", i, N, key_prefix="test/store" ) with ThreadPool(N) as pool: out = pool.map(f, range(N)) self.assertListEqual(out, [[f"data{i}".encode() for i in range(N)]] * N)
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/distributed_test.py
_create_c10d_store_mp
def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_workers): store = create_c10d_store(is_server, server_addr, port, world_size, wait_for_workers=wait_for_workers, timeout=2) if store is None: raise AssertionError() store.set(f"test_key/{os.getpid()}", "test_value".encode("UTF-8")) if IS_WINDOWS or IS_MACOS: print("tests incompatible with tsan or asan", file=sys.stderr) sys.exit(0) class DistributedUtilTest(TestCase): def test_create_store_single_server(self): store = create_c10d_store(is_server=True, server_addr=socket.gethostname()) self.assertIsNotNone(store) def test_create_store_no_port_multi(self): with self.assertRaises(ValueError): create_c10d_store( is_server=True, server_addr=socket.gethostname(), world_size=2 ) @unittest.skipIf(TEST_WITH_TSAN, "test incompatible with tsan") def test_create_store_multi(self): world_size = 3 wait_for_workers = False localhost = socket.gethostname() # start the server on the main process using an available port store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) # worker processes will use the port that was assigned to the server server_port = store.port worker0 = mp.Process( target=_create_c10d_store_mp, args=(False, localhost, server_port, world_size, wait_for_workers), ) worker1 = mp.Process( target=_create_c10d_store_mp, args=(False, localhost, server_port, world_size, wait_for_workers), ) worker0.start() worker1.start() worker0.join() worker1.join() # check test_key/pid == "test_value" self.assertEqual( "test_value", store.get(f"test_key/{worker0.pid}").decode("UTF-8") ) self.assertEqual( "test_value", store.get(f"test_key/{worker1.pid}").decode("UTF-8") ) self.assertEqual(0, worker0.exitcode) self.assertEqual(0, worker1.exitcode) def test_create_store_timeout_on_server(self): with self.assertRaises(TimeoutError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=True, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, ) def test_create_store_timeout_on_worker(self): with self.assertRaises(TimeoutError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, ) def test_port_already_in_use_on_server(self): # try to create the TCPStore server twice on the same port # the second should fail due to a port conflict # first store binds onto a free port # try creating the second store on the port that the first store binded to server_addr = socket.gethostname() pick_free_port = 0 store1 = create_c10d_store( is_server=True, server_addr=server_addr, server_port=pick_free_port, timeout=1, ) with self.assertRaises(RuntimeError): create_c10d_store( is_server=True, server_addr=server_addr, server_port=store1.port ) def test_port_already_in_use_on_worker(self): sock = get_socket_with_port() with closing(sock): port = sock.getsockname()[1] # on the worker port conflict shouldn't matter, it should just timeout # since we never created a server with self.assertRaises(TimeoutError): create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=port, timeout=1, ) if __name__ == "__main__": run_tests()
def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_workers): store = create_c10d_store( is_server, server_addr, port, world_size, wait_for_workers=wait_for_workers, timeout=2, ) if store is None: raise AssertionError store.set(f"test_key/{os.getpid()}", b"test_value") if IS_WINDOWS or IS_MACOS: print("tests incompatible with tsan or asan", file=sys.stderr) sys.exit(0) class DistributedUtilTest(TestCase): def test_create_store_single_server(self): store = create_c10d_store(is_server=True, server_addr=socket.gethostname()) self.assertIsNotNone(store) def test_create_store_no_port_multi(self): with self.assertRaises(ValueError): create_c10d_store( is_server=True, server_addr=socket.gethostname(), world_size=2 ) @unittest.skipIf(TEST_WITH_TSAN, "test incompatible with tsan") def test_create_store_multi(self): world_size = 3 wait_for_workers = False localhost = socket.gethostname() # start the server on the main process using an available port store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) # worker processes will use the port that was assigned to the server server_port = store.port worker0 = mp.Process( target=_create_c10d_store_mp, args=(False, localhost, server_port, world_size, wait_for_workers), ) worker1 = mp.Process( target=_create_c10d_store_mp, args=(False, localhost, server_port, world_size, wait_for_workers), ) worker0.start() worker1.start() worker0.join() worker1.join() # check test_key/pid == "test_value" self.assertEqual( "test_value", store.get(f"test_key/{worker0.pid}").decode("UTF-8") ) self.assertEqual( "test_value", store.get(f"test_key/{worker1.pid}").decode("UTF-8") ) self.assertEqual(0, worker0.exitcode) self.assertEqual(0, worker1.exitcode) def test_create_store_timeout_on_server(self): with self.assertRaises(DistStoreError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=True, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, ) def test_create_store_timeout_on_worker(self): with self.assertRaises(DistNetworkError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, ) def test_create_store_with_libuv_support(self): world_size = 1 wait_for_workers = False localhost = socket.gethostname() os.environ["USE_LIBUV"] = "0" store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) self.assertFalse(store.libuvBackend) del os.environ["USE_LIBUV"] assert "USE_LIBUV" not in os.environ # libuv backend is enabled by default store = create_c10d_store( is_server=True, server_addr=localhost, server_port=0, timeout=2, world_size=world_size, wait_for_workers=wait_for_workers, ) self.assertTrue(store.libuvBackend) def test_port_already_in_use_on_server(self): # try to create the TCPStore server twice on the same port # the second should fail due to a port conflict # first store binds onto a free port # try creating the second store on the port that the first store binded to server_addr = socket.gethostname() pick_free_port = 0 store1 = create_c10d_store( is_server=True, server_addr=server_addr, server_port=pick_free_port, timeout=1, ) with self.assertRaises(RuntimeError): create_c10d_store( is_server=True, server_addr=server_addr, server_port=store1.port ) def test_port_already_in_use_on_worker(self): sock = get_socket_with_port() with closing(sock): port = sock.getsockname()[1] # on the worker port conflict shouldn't matter, it should just timeout # since we never created a server with self.assertRaises(DistNetworkError): create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=port, timeout=1, ) if __name__ == "__main__": run_tests()
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase )
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed import DistNetworkError, DistStoreError from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/distributed_test.py
test_create_store_timeout_on_server
def test_create_store_timeout_on_server(self): with self.assertRaises(TimeoutError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=True, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, )
def test_create_store_timeout_on_server(self): with self.assertRaises(DistStoreError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=True, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, )
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase ) class DistributedUtilTest(TestCase):
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed import DistNetworkError, DistStoreError from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class DistributedUtilTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/elastic/utils/distributed_test.py
test_create_store_timeout_on_worker
def test_create_store_timeout_on_worker(self): with self.assertRaises(TimeoutError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, )
def test_create_store_timeout_on_worker(self): with self.assertRaises(DistNetworkError): # use any available port (port 0) since timeout is expected create_c10d_store( is_server=False, server_addr=socket.gethostname(), server_port=0, world_size=2, timeout=1, )
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase ) class DistributedUtilTest(TestCase):
import multiprocessing as mp import os import socket import sys import unittest from contextlib import closing from torch.distributed import DistNetworkError, DistStoreError from torch.distributed.elastic.utils.distributed import ( create_c10d_store, get_socket_with_port, ) from torch.testing._internal.common_utils import ( IS_MACOS, IS_WINDOWS, run_tests, TEST_WITH_TSAN, TestCase, ) class DistributedUtilTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_core.py
test_register_functions_called
def test_register_functions_called(self, cuda_first: bool, mixed_precision: bool): """Tests that ``_register_{pre|post}_backward_hooks()`` are called during the FSDP forward.""" fsdp_kwargs = {} if mixed_precision: fsdp_kwargs["mixed_precision"] = MixedPrecision() fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER, fsdp_kwargs, ) input = fsdp_model.module.get_input(torch.device("cuda")) # Since `_register_pre_backward_hooks()` modifies the forward output, # we cannot directly mock it. We implement our own counter instead. orig_register_pre_backward_hooks = ( torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks ) register_pre_backward_hooks_call_count = 0 def _register_pre_backward_hooks_with_count(*args, **kwargs): nonlocal register_pre_backward_hooks_call_count register_pre_backward_hooks_call_count += 1 return orig_register_pre_backward_hooks(*args, **kwargs) with mock.patch( "torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks", _register_pre_backward_hooks_with_count, ), mock.patch( "torch.distributed.fsdp._runtime_utils._register_post_backward_hooks" ) as register_post_bwd_mock: self.assertEqual(register_pre_backward_hooks_call_count, 0) self.assertFalse(register_post_bwd_mock.called) fsdp_model(*input) self.assertTrue(register_pre_backward_hooks_call_count > 0) self.assertTrue(register_post_bwd_mock.called)
def test_register_functions_called(self, cuda_first: bool, mixed_precision: bool): """Tests that ``_register_{pre|post}_backward_hooks()`` are called during the FSDP forward.""" fsdp_kwargs = {} if mixed_precision: fsdp_kwargs["mixed_precision"] = MixedPrecision() fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER, fsdp_kwargs, ) input = fsdp_model.module.get_input(torch.device("cuda")) # Since `_register_pre_backward_hooks()` modifies the forward output, # we cannot directly mock it. We implement our own counter instead. orig_register_pre_backward_hooks = ( torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks ) register_pre_backward_hooks_call_count = 0 def _register_pre_backward_hooks_with_count(*args, **kwargs): nonlocal register_pre_backward_hooks_call_count register_pre_backward_hooks_call_count += 1 return orig_register_pre_backward_hooks(*args, **kwargs) with mock.patch( "torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks", _register_pre_backward_hooks_with_count, ), mock.patch( "torch.distributed.fsdp._runtime_utils._register_post_backward_hook" ) as register_post_bwd_mock: self.assertEqual(register_pre_backward_hooks_call_count, 0) self.assertFalse(register_post_bwd_mock.called) fsdp_model(*input) self.assertTrue(register_pre_backward_hooks_call_count > 0) self.assertTrue(register_post_bwd_mock.called)
import functools import itertools import sys from typing import Any, Dict, List, Optional from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( AlwaysWrapNestedWrappedModule, CUDAInitMode, DummyDDP, FSDPInitMode, FSDPTest, MixtureOfExperts, NestedWrappedModule, NestedWrappedModuleWithDelay, subtest_name, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) params = "cpu_offload,sharding_strategy" cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)] sharding_strategy_config = [ None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ] configs = list(itertools.product(cpu_offload_config, sharding_strategy_config)) test_name_mapping = { str(CPUOffload(offload_params=True)): "offload_true", str(CPUOffload(offload_params=False)): "offload_false", str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op", str(ShardingStrategy.NO_SHARD): "no_shard", } subtest_name = functools.partial(subtest_name, test_name_mapping) class TestHooks(FSDPTest):
import contextlib import functools import itertools import sys from typing import Any, Callable, Dict, List, Optional from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp._flat_param import FlatParamHandle from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.distributed.utils import _p_assert from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( AlwaysWrapNestedWrappedModule, CUDAInitMode, DummyDDP, FSDPInitMode, FSDPTest, MixtureOfExperts, NestedWrappedModule, NestedWrappedModuleWithDelay, subtest_name, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) params = "cpu_offload,sharding_strategy" cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)] sharding_strategy_config = [ None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ] configs = list(itertools.product(cpu_offload_config, sharding_strategy_config)) test_name_mapping = { str(CPUOffload(offload_params=True)): "offload_true", str(CPUOffload(offload_params=False)): "offload_false", str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op", str(ShardingStrategy.NO_SHARD): "no_shard", } subtest_name = functools.partial(subtest_name, test_name_mapping) class TestHooks(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_core.py
_register_pre_backward_hooks_with_count
def _register_pre_backward_hooks_with_count(*args, **kwargs): nonlocal register_pre_backward_hooks_call_count register_pre_backward_hooks_call_count += 1 return orig_register_pre_backward_hooks(*args, **kwargs) with mock.patch( "torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks", _register_pre_backward_hooks_with_count, ), mock.patch( "torch.distributed.fsdp._runtime_utils._register_post_backward_hooks" ) as register_post_bwd_mock: self.assertEqual(register_pre_backward_hooks_call_count, 0) self.assertFalse(register_post_bwd_mock.called) fsdp_model(*input) self.assertTrue(register_pre_backward_hooks_call_count > 0) self.assertTrue(register_post_bwd_mock.called)
def _register_pre_backward_hooks_with_count(*args, **kwargs): nonlocal register_pre_backward_hooks_call_count register_pre_backward_hooks_call_count += 1 return orig_register_pre_backward_hooks(*args, **kwargs) with mock.patch( "torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks", _register_pre_backward_hooks_with_count, ), mock.patch( "torch.distributed.fsdp._runtime_utils._register_post_backward_hook" ) as register_post_bwd_mock: self.assertEqual(register_pre_backward_hooks_call_count, 0) self.assertFalse(register_post_bwd_mock.called) fsdp_model(*input) self.assertTrue(register_pre_backward_hooks_call_count > 0) self.assertTrue(register_post_bwd_mock.called)
import functools import itertools import sys from typing import Any, Dict, List, Optional from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( AlwaysWrapNestedWrappedModule, CUDAInitMode, DummyDDP, FSDPInitMode, FSDPTest, MixtureOfExperts, NestedWrappedModule, NestedWrappedModuleWithDelay, subtest_name, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) params = "cpu_offload,sharding_strategy" cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)] sharding_strategy_config = [ None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ] configs = list(itertools.product(cpu_offload_config, sharding_strategy_config)) test_name_mapping = { str(CPUOffload(offload_params=True)): "offload_true", str(CPUOffload(offload_params=False)): "offload_false", str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op", str(ShardingStrategy.NO_SHARD): "no_shard", } subtest_name = functools.partial(subtest_name, test_name_mapping)
import contextlib import functools import itertools import sys from typing import Any, Callable, Dict, List, Optional from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp._flat_param import FlatParamHandle from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.distributed.utils import _p_assert from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( AlwaysWrapNestedWrappedModule, CUDAInitMode, DummyDDP, FSDPInitMode, FSDPTest, MixtureOfExperts, NestedWrappedModule, NestedWrappedModuleWithDelay, subtest_name, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) params = "cpu_offload,sharding_strategy" cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)] sharding_strategy_config = [ None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ] configs = list(itertools.product(cpu_offload_config, sharding_strategy_config)) test_name_mapping = { str(CPUOffload(offload_params=True)): "offload_true", str(CPUOffload(offload_params=False)): "offload_false", str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op", str(ShardingStrategy.NO_SHARD): "no_shard", } subtest_name = functools.partial(subtest_name, test_name_mapping)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_core.py
_patch_use_unsharded_views
instantiate_parametrized_tests(TestHooks) instantiate_parametrized_tests(TestParityWithDDP) instantiate_parametrized_tests(TestNoGrad) instantiate_parametrized_tests(TestParamInit) if __name__ == "__main__": run_tests()
def _patch_use_unsharded_views(self, new_use_unsharded_views: Callable): orig_use_unsharded_views = FlatParamHandle._use_unsharded_views FlatParamHandle._use_unsharded_views = new_use_unsharded_views try: yield finally: FlatParamHandle._use_unsharded_views = orig_use_unsharded_views
import contextlib import functools import itertools import sys from typing import Any, Callable, Dict, List, Optional from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import CPUOffload, MixedPrecision from torch.distributed.fsdp._flat_param import FlatParamHandle from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.distributed.utils import _p_assert from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( AlwaysWrapNestedWrappedModule, CUDAInitMode, DummyDDP, FSDPInitMode, FSDPTest, MixtureOfExperts, NestedWrappedModule, NestedWrappedModuleWithDelay, subtest_name, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) params = "cpu_offload,sharding_strategy" cpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)] sharding_strategy_config = [ None, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ] configs = list(itertools.product(cpu_offload_config, sharding_strategy_config)) test_name_mapping = { str(CPUOffload(offload_params=True)): "offload_true", str(CPUOffload(offload_params=False)): "offload_false", str(ShardingStrategy.SHARD_GRAD_OP): "shard_grad_op", str(ShardingStrategy.NO_SHARD): "no_shard", } subtest_name = functools.partial(subtest_name, test_name_mapping) class TestAutograd(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
forward
def forward(self, x): return self.net4(self.net3(self.net2(self.net1(x))))
import io from copy import deepcopy import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._tensor import DTensor, Shard from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ( ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) class TestDummyModel(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
get_input
def get_input(self): return torch.rand(8, 8, device="cuda")
import io from copy import deepcopy import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._tensor import DTensor, Shard from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ( ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) class TestDummyModel(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
forward
def forward(self, x): return self.net4(self.net3(self.net2(self.net1(x))))
import io from copy import deepcopy import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._tensor import DTensor, Shard from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ( ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) class TestDummyModel(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
get_input
def get_input(self): return torch.rand(8, 8, device="cuda")
import io from copy import deepcopy import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._tensor import DTensor, Shard from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ( ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) class TestDummyModel(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_backward_prefetch.py
test_backward_prefetch
def test_backward_prefetch(self): # subtest reuse process group to shorten test time self.run_subtests( { "backward_prefetch": [ None, BackwardPrefetch.BACKWARD_PRE, BackwardPrefetch.BACKWARD_POST, ], }, self._test_backward_prefetch, )
import sys from typing import List from unittest.mock import patch import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_handle_fqns_from_root from torch.distributed.fsdp._flat_param import HandleTrainingState from torch.distributed.fsdp._runtime_utils import ( _get_handle_to_prefetch, _get_training_state, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN NUM_ITERS = 2 DECODER_PARAM_FQNS = [ "decoder.layers.{index}.self_attn.in_proj_weight", "decoder.layers.{index}.self_attn.in_proj_bias", "decoder.layers.{index}.self_attn.out_proj.weight", "decoder.layers.{index}.self_attn.out_proj.bias", "decoder.layers.{index}.multihead_attn.in_proj_weight", "decoder.layers.{index}.multihead_attn.in_proj_bias", "decoder.layers.{index}.multihead_attn.out_proj.weight", "decoder.layers.{index}.multihead_attn.out_proj.bias", "decoder.layers.{index}.linear1.weight", "decoder.layers.{index}.linear1.bias", "decoder.layers.{index}.linear2.weight", "decoder.layers.{index}.linear2.bias", "decoder.layers.{index}.norm1.weight", "decoder.layers.{index}.norm1.bias", "decoder.layers.{index}.norm2.weight", "decoder.layers.{index}.norm2.bias", "decoder.layers.{index}.norm3.weight", "decoder.layers.{index}.norm3.bias", ] ENCODER_PARAM_FQNS = [ "encoder.layers.{index}.self_attn.in_proj_weight", "encoder.layers.{index}.self_attn.in_proj_bias", "encoder.layers.{index}.self_attn.out_proj.weight", "encoder.layers.{index}.self_attn.out_proj.bias", "encoder.layers.{index}.linear1.weight", "encoder.layers.{index}.linear1.bias", "encoder.layers.{index}.linear2.weight", "encoder.layers.{index}.linear2.bias", "encoder.layers.{index}.norm1.weight", "encoder.layers.{index}.norm1.bias", "encoder.layers.{index}.norm2.weight", "encoder.layers.{index}.norm2.bias", ] TOTAL_NUM_PREFETCH_FOR_PRE = 12 TOTAL_NUM_PREFETCH_FOR_POST = 11 ENCODER_BEGIN_INDEX_FOR_PRE = 6 ENCODER_BEGIN_INDEX_FOR_POST = 5 ENCODER_PREFETCH_NUM = 5 class TestBackwardPrefetch(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_backward_prefetch.py
_test_backward_prefetch
def _test_backward_prefetch(self, backward_prefetch: BackwardPrefetch): self._dist_train(backward_prefetch)
import sys from typing import List from unittest.mock import patch import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import BackwardPrefetch, FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_handle_fqns_from_root from torch.distributed.fsdp._flat_param import HandleTrainingState from torch.distributed.fsdp._runtime_utils import ( _get_handle_to_prefetch, _get_training_state, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN NUM_ITERS = 2 DECODER_PARAM_FQNS = [ "decoder.layers.{index}.self_attn.in_proj_weight", "decoder.layers.{index}.self_attn.in_proj_bias", "decoder.layers.{index}.self_attn.out_proj.weight", "decoder.layers.{index}.self_attn.out_proj.bias", "decoder.layers.{index}.multihead_attn.in_proj_weight", "decoder.layers.{index}.multihead_attn.in_proj_bias", "decoder.layers.{index}.multihead_attn.out_proj.weight", "decoder.layers.{index}.multihead_attn.out_proj.bias", "decoder.layers.{index}.linear1.weight", "decoder.layers.{index}.linear1.bias", "decoder.layers.{index}.linear2.weight", "decoder.layers.{index}.linear2.bias", "decoder.layers.{index}.norm1.weight", "decoder.layers.{index}.norm1.bias", "decoder.layers.{index}.norm2.weight", "decoder.layers.{index}.norm2.bias", "decoder.layers.{index}.norm3.weight", "decoder.layers.{index}.norm3.bias", ] ENCODER_PARAM_FQNS = [ "encoder.layers.{index}.self_attn.in_proj_weight", "encoder.layers.{index}.self_attn.in_proj_bias", "encoder.layers.{index}.self_attn.out_proj.weight", "encoder.layers.{index}.self_attn.out_proj.bias", "encoder.layers.{index}.linear1.weight", "encoder.layers.{index}.linear1.bias", "encoder.layers.{index}.linear2.weight", "encoder.layers.{index}.linear2.bias", "encoder.layers.{index}.norm1.weight", "encoder.layers.{index}.norm1.bias", "encoder.layers.{index}.norm2.weight", "encoder.layers.{index}.norm2.bias", ] TOTAL_NUM_PREFETCH_FOR_PRE = 12 TOTAL_NUM_PREFETCH_FOR_POST = 11 ENCODER_BEGIN_INDEX_FOR_PRE = 6 ENCODER_BEGIN_INDEX_FOR_POST = 5 ENCODER_PREFETCH_NUM = 5 class TestBackwardPrefetch(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/util_test.py
test_barrier
def test_barrier(self): store = MockStore() store_util.barrier(store, 3, key_prefix="test/store") self.assertListEqual( store.ops, [ ("timeout",), ("set_timeout", datetime.timedelta(seconds=300)), ("add", "test/store/num_members", 1), ("set", "test/store/last_member", "<val_ignored>"), ("wait", ["test/store/last_member"]), ("set_timeout", datetime.timedelta(seconds=store._TEST_TIMEOUT)), ], )
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/util_test.py
test_barrier_timeout_rank_tracing
def test_barrier_timeout_rank_tracing(self): N = 3 store = dist.HashStore() def run_barrier_for_rank(i: int): try: store_util.barrier( store, N, key_prefix="test/store", barrier_timeout=0.1, rank=i, rank_tracing_decoder=lambda x: f"Rank {x} host", trace_timeout=0.01, ) except Exception as e: return str(e) return "" with ThreadPool(N - 1) as pool: outputs: List[str] = pool.map(run_barrier_for_rank, range(N - 1)) self.assertTrue(any("missing_ranks=[Rank 2 host]" in msg for msg in outputs)) self.assertTrue( any( "check rank 0 (Rank 0 host) for missing rank info" in msg for msg in outputs ) )
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/util_test.py
run_barrier_for_rank
def run_barrier_for_rank(i: int): try: store_util.barrier( store, N, key_prefix="test/store", barrier_timeout=0.1, rank=i, rank_tracing_decoder=lambda x: f"Rank {x} host", trace_timeout=0.01, ) except Exception as e: return str(e) return "" with ThreadPool(N - 1) as pool: outputs: List[str] = pool.map(run_barrier_for_rank, range(N - 1)) self.assertTrue(any("missing_ranks=[Rank 2 host]" in msg for msg in outputs)) self.assertTrue( any( "check rank 0 (Rank 0 host) for missing rank info" in msg for msg in outputs ) )
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/elastic/utils/util_test.py
test_barrier_timeout_operations
def test_barrier_timeout_operations(self): import torch DistStoreError = torch._C._DistStoreError N = 3 store = MockStore() # rank 0 with mock.patch.object(store, "wait") as wait_mock: wait_mock.side_effect = [DistStoreError("test"), None, None] with self.assertRaises(DistStoreError): store_util.barrier( store, N, key_prefix="test/store", barrier_timeout=1, rank=0, rank_tracing_decoder=lambda x: f"Rank {x} host", trace_timeout=0.1, ) self.assertListEqual( store.ops, [ ("timeout",), ("set_timeout", datetime.timedelta(seconds=1)), ("add", "test/store/num_members", 1), ("set", "test/store/last_member", "<val_ignored>"), # wait for last member is mocked ("set", "test/store0/TRACE", "<val_ignored>"), # wait for each rank is mocked ("set", "test/store/TRACING_GATE", "<val_ignored>"), ], ) # rank 1 with mock.patch.object(store, "wait") as wait_mock: store.ops = [] wait_mock.side_effect = [ DistStoreError("test"), None, ] with self.assertRaises(DistStoreError): store_util.barrier( store, N, key_prefix="test/store", barrier_timeout=1, rank=1, rank_tracing_decoder=lambda x: f"Rank {x} host", trace_timeout=0.1, ) self.assertListEqual( store.ops, [ ("timeout",), ("set_timeout", datetime.timedelta(seconds=1)), ("add", "test/store/num_members", 1), ("set", "test/store/last_member", "<val_ignored>"), ("set", "test/store1/TRACE", "<val_ignored>"), # wait for gate is mocked ], )
import datetime from multiprocessing.pool import ThreadPool from typing import List from unittest import mock import torch.distributed as dist import torch.distributed.elastic.utils.store as store_util from torch.distributed.elastic.utils.logging import get_logger from torch.testing._internal.common_utils import run_tests, TestCase class StoreUtilTest(TestCase): import torch
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/flight_recorder/test_fr_analysis.py
test_match_one_event
def test_match_one_event(self): e1 = create_one_event( "all_reduce", ("0", "default"), [[4, 4]], [[4, 4]], "scheduled", 1 ) membership = {"0": {0, 1}} self.assertEqual( match_one_event(e1, e1, membership, "0"), MatchState.FULLY_MATCHED ) e2 = create_one_event( "all_gather", ("0", "default"), [[4, 4]], [[4, 4]], "scheduled", 1 ) self.assertEqual( match_one_event(e1, e2, membership, "0"), MatchState.COLLECTIVE_TYPE_MISMATCH, ) e3 = create_one_event( "all_to_all", ("0", "default"), [[4, 4]], [[4, 4]], "scheduled", 1 ) e4 = create_one_event( "all_to_all", ("0", "default"), [[4, 4]], [[4, 4]], "scheduled", 1 ) self.assertEqual(match_one_event(e3, e4, membership, "0"), MatchState.UNDECIDED) e5 = create_one_event( "all_reduce", ("0", "default"), [[5, 4]], [[4, 4]], "scheduled", 1, 1 ) self.assertEqual( match_one_event(e1, e5, membership, "0"), MatchState.SIZE_OR_SYNTAX_MISMATCH ) e6 = create_one_event( "all_reduce", ("0", "default"), [[4, 4]], [[5, 4]], "scheduled", 1, 2 ) self.assertEqual( match_one_event(e1, e6, membership, "0"), MatchState.SIZE_OR_SYNTAX_MISMATCH ) e7 = create_one_event( "all_reduce", ("0", "default"), [[4, 4]], [[5, 4]], "scheduled", 2 ) self.assertEqual( match_one_event(e7, e7, membership, "0"), MatchState.SIZE_OR_SYNTAX_MISMATCH ) e9 = create_one_event( "all_reduce", ("0", "default"), [[4, 4]], [[4, 4]], "completed", 1 ) self.assertEqual( match_one_event(e1, e9, membership, "0"), MatchState.COLLECTIVE_STATE_MISMATCH, ) e10 = create_one_event( "all_reduce", ("0", "default"), [[4, 4]], [[4, 4]], "completed", 1, output_dtypes="float16", ) self.assertEqual( match_one_event(e10, e9, membership, "0"), MatchState.COLLECTIVE_DTYPE_MISMATCH, )
import pathlib import sys REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent.parent from tools.flight_recorder.components.types import MatchState from tools.flight_recorder.components.utils import match_one_event from torch.testing._internal.common_utils import run_tests, TestCase class FlightRecorderEventTest(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
__init__
def __init__(self): super().__init__() self.lin = nn.Linear(10, 10)
def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
__init__
def __init__(self): super().__init__() self.lin = nn.Linear(10, 10)
def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10)
import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions" class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_checkpoint_wrapper.py
ctx_manager
def ctx_manager(): nonlocal count count += 1 yield
import contextlib import unittest from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, CheckpointWrapper, offload_wrapper, OffloadWrapper, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_utils import run_tests, TestCase from torch.utils.checkpoint import checkpoint _SAVED_PREFIX = "_saved_" GRAD_FN_NEXT_FUNCTIONS = "next_functions"
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
test_raises_warning_or_errors
def test_raises_warning_or_errors(self): device_mesh = init_device_mesh(self.device_type, (self.world_size,)) model, optim = self._create_model( is_even_sharded_model=True, device_mesh=device_mesh ) # initialize optim model(model.get_input()).sum().backward() optim.step() with self.assertRaisesRegex( RuntimeError, "DeviceMesh is not compatible with LOCAL_STATE_DICT." ): with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): state_dict = model.state_dict() with self.assertRaisesRegex( RuntimeError, "DeviceMesh is not compatible with LOCAL_STATE_DICT." ): with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): optim_state_dict = FSDP.optim_state_dict(model, optim)
import io from copy import deepcopy import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor from torch.distributed._tensor import DTensor, Shard from torch.distributed.device_mesh import init_device_mesh from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.api import ( ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) from torch.testing._internal.distributed._tensor.common_dtensor import ( DTensorTestBase, skip_if_lt_x_gpu, with_comms, ) class TestFSDPWithDeviceMeshAndDTensor(DTensorTestBase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_exec_order.py
test_train_eval
def test_train_eval(self, sharding_strategy: ShardingStrategy): dist.set_debug_level(dist.DebugLevel.INFO) fsdp_model = Model.wrap(sharding_strategy, self.device) NUM_ITERS = 3 NUM_EPOCHS = 2 with warnings.catch_warnings(record=True) as w: # records warnings to `w` for _ in range(NUM_EPOCHS): fsdp_model.train() for _ in range(NUM_ITERS): inp = fsdp_model.module.get_input(self.device) output = fsdp_model(*inp) loss = fsdp_model.module.get_loss(inp, output).to(self.device) fsdp_model.module.run_backward(loss) fsdp_model.eval() for _ in range(NUM_ITERS): inp = fsdp_model.module.get_input(self.device) output = fsdp_model(*inp) fsdp_model.module.get_loss(inp, output).to(self.device) # Check that the order validation warning was not issued (errors do not # need to be checked since they will be directly reported) warning_prefix = "Forward order differs" for warning in w: if str(warning.message).startswith(warning_prefix): raise AssertionError( f"Warning was incorrectly issued: {warning.message}" ) # If we still validate the forward execution order in eval mode, then # an `AssertionError` will be raised above for both sharding strategies
def test_train_eval(self, sharding_strategy: ShardingStrategy): dist.set_debug_level(dist.DebugLevel.DETAIL) fsdp_model = Model.wrap(sharding_strategy, self.device) NUM_ITERS = 3 NUM_EPOCHS = 2 with warnings.catch_warnings(record=True) as w: # records warnings to `w` for _ in range(NUM_EPOCHS): fsdp_model.train() for _ in range(NUM_ITERS): inp = fsdp_model.module.get_input(self.device) output = fsdp_model(*inp) loss = fsdp_model.module.get_loss(inp, output).to(self.device) fsdp_model.module.run_backward(loss) fsdp_model.eval() for _ in range(NUM_ITERS): inp = fsdp_model.module.get_input(self.device) output = fsdp_model(*inp) fsdp_model.module.get_loss(inp, output).to(self.device) # Check that the order validation warning was not issued (errors do not # need to be checked since they will be directly reported) warning_prefix = "Forward order differs" for warning in w: if str(warning.message).startswith(warning_prefix): raise AssertionError( f"Warning was incorrectly issued: {warning.message}" ) # If we still validate the forward execution order in eval mode, then # an `AssertionError` will be raised above for both sharding strategies
import sys import warnings from contextlib import suppress import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPExecOrder(FSDPTest):
import sys import warnings from contextlib import nullcontext import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPExecOrder(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
forward
def forward(self, frozen_input, learnable_input): return super().forward(frozen_input)
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class LinearUnusedInput(nn.Linear):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
__init__
def __init__(self, freeze: bool): super().__init__() self.layer0 = LinearUnusedInput(4, 4, device="cuda") self.layer1_frozen = LinearUnusedInput(4, 4, device="cuda") if freeze: for param in self.layer1_frozen.parameters(): param.requires_grad = False self.layer2 = LinearUnusedInput(4, 4, device="cuda")
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class ModelUnusedInput(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
forward
def forward(self, frozen_input, learnable_input): return super().forward(frozen_input)
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class LinearUnusedInput(nn.Linear):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
test_backward_reshard_hooks
def test_backward_reshard_hooks(self): """ Tests that the post-backward reshard happens even for flat parameters that do not require gradients. """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], "inp_requires_grad": [False, True], "unfreeze_params": [False, True], }, self._test_backward_reshard_hooks, )
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFSDPFineTune(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
_post_backward_reshard_with_count
def _post_backward_reshard_with_count(*args, **kwargs): nonlocal post_backward_reshard_count post_backward_reshard_count += 1 return orig_post_backward_reshard(*args, **kwargs)
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm_hooks.py
custom_reduce_scatter
def custom_reduce_scatter(self, output, input, group=None): """ This function is for illustrative purpose only. It is meant to implement a custom reduce-scatter of a flattened tensor to all processes in a group. Currently a no-op. """ pass
def custom_reduce_scatter(self, output, input, group=None): """ This function is for illustrative purpose only. It is meant to implement a custom reduce-scatter of a flattened tensor to all processes in a group. Currently a no-op. """
import sys from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.algorithms._comm_hooks import default_hooks from torch.distributed.distributed_c10d import _get_default_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.testing._internal.common_distributed import ( requires_nccl, requires_nccl_version, sandcastle_skip_if, skip_if_lt_x_gpu, skip_if_rocm, ) from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) BFLOAT16_AVAILABLE = ( torch.cuda.is_available() and torch.version.cuda is not None and int(torch.version.cuda.split(".")[0]) >= 11 ) class DummyHook:
import sys from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.algorithms._comm_hooks import default_hooks from torch.distributed.distributed_c10d import _get_default_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.testing._internal.common_distributed import ( requires_nccl, requires_nccl_version, skip_but_pass_in_sandcastle_if, skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, ) BFLOAT16_AVAILABLE = torch.cuda.is_available() and ( torch.version.cuda is not None or torch.version.hip is not None ) class DummyHook:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_comm.py
_run_iter
def _run_iter(self, fsdp_model, batch, use_no_sync: bool): """Runs an iteration inside or outside the ``no_sync()`` context.""" context = fsdp_model.no_sync() if use_no_sync else suppress() with context: output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward()
def _run_iter(self, fsdp_model, batch, use_no_sync: bool): """Runs an iteration inside or outside the ``no_sync()`` context.""" context = fsdp_model.no_sync() if use_no_sync else nullcontext() with context: output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward()
import sys from contextlib import suppress from enum import auto, Enum from typing import Optional from unittest.mock import patch import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestCommunication(FSDPTest):
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestCommunication(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_comm.py
test_unshard_async
def test_unshard_async(self, use_orig_params: bool): class ReduceModule(nn.Module): def __init__(self, dim: int, group: dist.ProcessGroup): super().__init__() self.group = group self.weight = nn.Parameter(torch.randn(dim, dim)) def forward(self, x: torch.Tensor): y = F.relu(x @ self.weight) # NOTE: This all-reduce is not differentiable and is included # to exercise the overlap. work = dist.all_reduce(y, group=self.group, async_op=True) return y, work class MLPs(nn.Module): def __init__(self, dim: int): super().__init__() self.mlp1 = MLP(dim) self.mlp2 = MLP(dim) self.mlp3 = MLP(dim) def forward(self, ys: List[torch.Tensor], works: List[dist.Work]): (y1, y2, y3), (work1, work2, work3) = ys, works work1.wait() z1 = self.mlp1(y1) work2.wait() z2 = self.mlp2(y2) work3.wait() z3 = self.mlp3(y3) return z1 + z2 + z3 class ReduceModel(nn.Module): def __init__(self, dim: int, group: dist.ProcessGroup): super().__init__() self.reduce_module1 = ReduceModule(dim, group) self.reduce_module2 = ReduceModule(dim, group) self.reduce_module3 = ReduceModule(dim, group) self.mlps = MLPs(dim) def forward(self, x: torch.Tensor): y1, work1 = self.reduce_module1(x) if isinstance(self.mlps.mlp1, FSDP): self.mlps.mlp1._unshard(async_op=True) y2, work2 = self.reduce_module2(x) if isinstance(self.mlps.mlp2, FSDP): self.mlps.mlp2._unshard(async_op=True) y3, work3 = self.reduce_module3(x) if isinstance(self.mlps.mlp3, FSDP): self.mlps.mlp3._unshard(async_op=True) return self.mlps([y1, y2, y3], [work1, work2, work3]) group = self.process_group batch_size, dim = 2, 8 torch.manual_seed(42) ref_model = DDP(ReduceModel(dim, group).cuda(), device_ids=[self.rank]) ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) torch.manual_seed(42) model = ReduceModel(dim, group) model.mlps = FSDP( model.mlps, sharding_strategy=ShardingStrategy.SHARD_GRAD_OP, auto_wrap_policy=ModuleWrapPolicy((MLP,)), device_id=self.rank, use_orig_params=use_orig_params, ) model.mlps.check_is_root() mlp_params = set(model.mlps.parameters()) mlp_param_names = {n for n, p in model.named_parameters() if p in mlp_params} DDP._set_params_and_buffers_to_ignore_for_model(model, mlp_param_names) model = DDP(model.cuda(), device_ids=[self.rank]) optim = torch.optim.Adam(model.parameters(), lr=1e-2) torch.manual_seed(42 + self.rank + 1) inp = torch.randn((batch_size, dim), device="cuda") for _ in range(10): losses: List[torch.Tensor] = [] for _model, _optim in ((ref_model, ref_optim), (model, optim)): losses.append(_model(inp).sum()) losses[-1].backward() _optim.step() _optim.zero_grad() self.assertEqual(losses[0], losses[1]) model.module.mlps._wait_unshard_streams_on_current_stream()
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestExplicitUnshard(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
__init__
def __init__(self, dim: int, group: dist.ProcessGroup): super().__init__() self.group = group self.weight = nn.Parameter(torch.randn(dim, dim))
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
forward
def forward(self, x: torch.Tensor): y = F.relu(x @ self.weight) # NOTE: This all-reduce is not differentiable and is included # to exercise the overlap. work = dist.all_reduce(y, group=self.group, async_op=True) return y, work
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
__init__
def __init__(self, dim: int, group: dist.ProcessGroup): super().__init__() self.group = group self.weight = nn.Parameter(torch.randn(dim, dim))
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
forward
def forward(self, x: torch.Tensor): y = F.relu(x @ self.weight) # NOTE: This all-reduce is not differentiable and is included # to exercise the overlap. work = dist.all_reduce(y, group=self.group, async_op=True) return y, work
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
__init__
def __init__(self, dim: int, group: dist.ProcessGroup): super().__init__() self.group = group self.weight = nn.Parameter(torch.randn(dim, dim))
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_comm.py
forward
instantiate_parametrized_tests(TestCommunication) if __name__ == "__main__": run_tests()
def forward(self, x: torch.Tensor): y = F.relu(x @ self.weight) # NOTE: This all-reduce is not differentiable and is included # to exercise the overlap. work = dist.all_reduce(y, group=self.group, async_op=True) return y, work
import sys from contextlib import nullcontext from enum import auto, Enum from typing import List, Optional from unittest.mock import patch import torch import torch.nn as nn import torch.nn.functional as F from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class ReduceModule(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
_assert_post_backward_requires_grad
def _assert_post_backward_requires_grad(seq): if step_idx == num_steps - 1 and unfreeze_params: self.assertTrue( all(p.requires_grad for p in seq.parameters()), msg="Expected all parameters to require grad but some did not!", )
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
_assert_post_backward_reshard_count
def _assert_post_backward_reshard_count(step_idx, num_steps): if step_idx < num_steps - 1 or not unfreeze_params: # If the input does not require gradient, then the 0th # frozen linear gets resharded in the catch-all reshard # since we cannot register an autograd hook on it expected_post_backward_reshard_count = ( self.NUM_LINEARS if inp_requires_grad else self.NUM_LINEARS - 1 ) else: # This follows the normal post-backward hook path expected_post_backward_reshard_count = self.NUM_LINEARS self.assertEqual( post_backward_reshard_count, expected_post_backward_reshard_count ) with mock.patch( "torch.distributed.fsdp._runtime_utils._post_backward_reshard", _post_backward_reshard_with_count, ): num_steps = 3 # interleave a `no_grad` step to validate post-backward hooks are not registered in that context # and that `requires_grad` is reset appropriately when unfreezing nograd_step_idx = 1 for step_idx in range(num_steps): if unfreeze_params and step_idx == num_steps - 1: # Unfreeze the parameters on the last step to emulate some # kinds of fine-tuning self._set_seq_module_requires_grad(seq, True) inp = torch.randn( (8, 5), device="cuda", requires_grad=inp_requires_grad ) if step_idx == nograd_step_idx: with torch.no_grad(): output = seq(inp) else: output = seq(inp) if step_idx != nograd_step_idx: output.sum().backward() _assert_post_backward_requires_grad(seq) _assert_post_backward_reshard_count(step_idx, num_steps) post_backward_reshard_count = 0
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
forward
def forward(self, frozen_input, learnable_input): return super().forward(frozen_input)
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class LinearUnusedInput(nn.Linear):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
test_hooks_multi_traversal
def test_hooks_multi_traversal(self): """ Tests that the hooks do reshard / unshard correctly in the case of same parameters being used multiple times during forward pass. """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], "inp_requires_grad": [False, True], "forward_prefetch": [False, True], }, self._test_hooks_multi_traversal, )
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFSDPFineTune(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
test_parity_with_ddp
def test_parity_with_ddp(self): """ Tests parity with DDP when mixing flat parameters that require and do not require gradients. """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ShardingStrategy.NO_SHARD, ], "use_orig_params": [False, True], }, self._test_parity_with_ddp, )
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFSDPFineTune(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_fine_tune.py
test_parity_with_non_frozen_fsdp
def test_parity_with_non_frozen_fsdp(self): """ For frozen modules with unused input, reshard could happen without unshard Verify numerical parity between `_post_backward_reshard_only_hook` and `_post_backward_hook` path """ self.run_subtests( { "sharding_strategy": [ ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP, ], "use_orig_params": [True, False], "offload_params": [True, False], "mixed_precision": [ MixedPrecision(), MixedPrecision( param_dtype=torch.float16, buffer_dtype=torch.float16, reduce_dtype=torch.float16, ), ], "backward_prefetch": [ BackwardPrefetch.BACKWARD_PRE, BackwardPrefetch.BACKWARD_POST, ], }, self._test_parity_with_non_frozen_fsdp, )
import copy import sys from unittest import mock import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import BackwardPrefetch, CPUOffload, MixedPrecision from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFSDPFineTune(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_flatten_params.py
_get_default_config
def _get_default_config(self): return (HandleShardingStrategy.FULL_SHARD, False, None, None, False)
def _get_default_config(self): return { "device": torch.device("cuda"), "sharding_strategy": HandleShardingStrategy.FULL_SHARD, "offload_params": False, "mp_param_dtype": None, "mp_reduce_dtype": None, "keep_low_precision_grads": False, "process_group": self.process_group, "use_orig_params": False, "fsdp_extension": None, }
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_flatten_params.py
_get_transformer
def _get_transformer(self, seed=0): torch.manual_seed(seed) # keep everything deterministic module = torch.nn.Transformer( d_model=32, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=128, dropout=0.1, ) module.register_buffer("dummy_buffer", torch.tensor(1.0)) def get_input(device, dtype): torch.manual_seed(1) # keep everything deterministic src = torch.rand(20, 8, 32).to(device=device, dtype=dtype) # T x B x C tgt = torch.rand(10, 8, 32).to(device=device, dtype=dtype) # T x B x C return (src, tgt) module.get_input = get_input return module
def _get_transformer(self, seed=0): torch.manual_seed(seed) # keep everything deterministic module = torch.nn.Transformer( d_model=32, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=128, dropout=0.1, ) module.dummy_buffer = nn.Buffer(torch.tensor(1.0)) def get_input(device, dtype): torch.manual_seed(1) # keep everything deterministic src = torch.rand(20, 8, 32).to(device=device, dtype=dtype) # T x B x C tgt = torch.rand(10, 8, 32).to(device=device, dtype=dtype) # T x B x C return (src, tgt) module.get_input = get_input return module
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_flatten_params.py
test_flat_param_shard_metadata_unaligned
""" Tests that ``FlatParameter`` shard metadata are computed as expected. """ module = torch.nn.Sequential( torch.nn.Linear(10, 10, bias=False), torch.nn.ReLU(), torch.nn.Linear(10, 10, bias=False), torch.nn.ReLU(), torch.nn.Linear(10, 10, bias=False), torch.nn.ReLU(), ) params_to_flatten = list(module.parameters()) flat_param_handle = FlatParamHandle( params_to_flatten, module, torch.device("cuda"), *self._get_default_config(), self.process_group, False, )
def test_flat_param_shard_metadata_unaligned(self): """ Tests that ``FlatParameter`` shard metadata are computed as expected without any explicit alignment padding. """ module = torch.nn.Sequential( torch.nn.Linear(10, 10, bias=False), nn.ReLU(), torch.nn.Linear(10, 10, bias=False), nn.ReLU(), torch.nn.Linear(10, 10, bias=False), nn.ReLU(), ) params_to_flatten = list(module.parameters()) handle = FlatParamHandle( params_to_flatten, module, **self._get_default_config(), ) self._test_flat_param_shard_metadata( handle, start=0, end=0, expected=FlatParamShardMetadata( param_names=["0.weight"], param_shapes=[(10, 10)], param_numels=[100], param_offsets=[(0, 0)], ), ) self._test_flat_param_shard_metadata( handle, start=0, end=50, expected=FlatParamShardMetadata( param_names=["0.weight"], param_shapes=[(10, 10)], param_numels=[100], param_offsets=[(0, 50)], ), ) self._test_flat_param_shard_metadata( handle, start=0, end=99, expected=FlatParamShardMetadata( param_names=["0.weight"], param_shapes=[(10, 10)], param_numels=[100], param_offsets=[(0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=50, end=149, expected=FlatParamShardMetadata( param_names=["0.weight", "2.weight"], param_shapes=[(10, 10), (10, 10)], param_numels=[100, 100], param_offsets=[(50, 99), (0, 49)], ), ) self._test_flat_param_shard_metadata( handle, start=50, end=199, expected=FlatParamShardMetadata( param_names=["0.weight", "2.weight"], param_shapes=[(10, 10), (10, 10)], param_numels=[100, 100], param_offsets=[(50, 99), (0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=99, end=199, expected=FlatParamShardMetadata( param_names=["0.weight", "2.weight"], param_shapes=[(10, 10), (10, 10)], param_numels=[100, 100], param_offsets=[(99, 99), (0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=100, end=199, expected=FlatParamShardMetadata( param_names=["2.weight"], param_shapes=[(10, 10)], param_numels=[100], param_offsets=[(0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=100, end=299, expected=FlatParamShardMetadata( param_names=["2.weight", "4.weight"], param_shapes=[(10, 10), (10, 10)], param_numels=[100, 100], param_offsets=[(0, 99), (0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=100, end=1000, expected=FlatParamShardMetadata( param_names=["2.weight", "4.weight"], param_shapes=[(10, 10), (10, 10)], param_numels=[100, 100], param_offsets=[(0, 99), (0, 99)], ), ) self._test_flat_param_shard_metadata( handle, start=299, end=299, expected=FlatParamShardMetadata( param_names=["4.weight"], param_shapes=[(10, 10)], param_numels=[100], param_offsets=[(99, 99)], ), )
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_flatten_params.py
test_flat_param_shard_metadata_aligned_full_precision
def test_flat_param_shard_metadata_aligned_full_precision(self): """ Tests that ``FlatParameter`` shard metadata are computed as expected with alignment padding and parameter full precision. """ module = torch.nn.Sequential( torch.nn.Linear(3, 7, bias=False), # 0.weight torch.nn.Linear(7, 5, bias=False), # 1.weight torch.nn.Linear(5, 5, bias=False), # 2.weight ) params_to_flatten = list(module.parameters()) handle_kwargs = self._get_default_config() handle_kwargs["use_orig_params"] = True handle = FlatParamHandle(params_to_flatten, module, **handle_kwargs) # For 32-bit full precision, FSDP pads up to 3 numel after each # original parameter to achieve 0 mod 4 numel (i.e. 0 mod 16 bytes). # Thus, the unsharded `FlatParameter` layout looks like: # 21 + (3) + 35 + (1) + 25 # where (x) means x numel of padding. This gives a total of 85 numel. # The `FlatParamShardMetadata` do not include alignment padding but do # account for them self._test_flat_param_shard_metadata( handle, # Emulate rank 0 of 2 ranks start=0, end=42, expected=FlatParamShardMetadata( param_names=["0.weight", "1.weight"], param_shapes=[(7, 3), (5, 7)], param_numels=[21, 35], # 21 + (3) + 19 = 43 param_offsets=[(0, 20), (0, 18)], ), ) self._test_flat_param_shard_metadata( handle, # Emulate rank 1 of 2 ranks start=43, end=85, expected=FlatParamShardMetadata( param_names=["1.weight", "2.weight"], param_shapes=[(5, 7), (5, 5)], param_numels=[35, 25], # 16 + (1) + 25 = 42 param_offsets=[(19, 34), (0, 24)], ), )
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_flatten_params.py
test_flat_param_shard_metadata_aligned_mixed_precision
if __name__ == "__main__": run_tests()
def test_flat_param_shard_metadata_aligned_mixed_precision(self): """ Tests that ``FlatParameter`` shard metadata are computed as expected with alignment padding and parameter mixed precision. """ module = torch.nn.Sequential( torch.nn.Linear(2, 5, bias=False), # 0.weight torch.nn.Linear(5, 5, bias=False), # 1.weight torch.nn.Linear(5, 3, bias=False), # 2.weight ) params_to_flatten = list(module.parameters()) handle_kwargs = self._get_default_config() handle_kwargs["use_orig_params"] = True handle_kwargs["mp_param_dtype"] = torch.float16 handle = FlatParamHandle(params_to_flatten, module, **handle_kwargs) # For 16-bit mixed precision, FSDP pads up to 7 numel after each # original parameter to achieve 0 mod 8 numel (i.e. 0 mod 16 bytes). # Thus, the unsharded `FlatParameter` layout looks like: # 10 + (6) + 25 + (7) + 15 # where (x) means x numel of padding. This gives a total of 63 numel. # The `FlatParamShardMetadata` do not include alignment padding but do # account for them self._test_flat_param_shard_metadata( handle, # Emulate rank 0 of 2 ranks start=0, end=31, expected=FlatParamShardMetadata( param_names=["0.weight", "1.weight"], param_shapes=[(5, 2), (5, 5)], param_numels=[10, 25], # 10 + (6) + 16 = 32 param_offsets=[(0, 9), (0, 15)], ), ) self._test_flat_param_shard_metadata( handle, # Emulate rank 1 of 2 ranks start=32, end=63, expected=FlatParamShardMetadata( param_names=["1.weight", "2.weight"], param_shapes=[(5, 5), (3, 5)], param_numels=[25, 15], # 9 + (7) + 15 = 31 param_offsets=[(16, 24), (0, 14)], ), )
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import ( FlatParamHandle, FlatParamShardMetadata, HandleShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN class TestFlattenParams(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
__init__
def __init__(self, with_fsdp, freeze_after_wrap_fsdp): super().__init__() self.trunk = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.head = nn.Linear(64, 10) if with_fsdp and freeze_after_wrap_fsdp: self.fsdp_wrap()
def __init__( self, with_fsdp, freeze_after_wrap_fsdp, disable_autograd, fsdp_kwargs, ): super().__init__() self.trunk = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.device = torch.cuda.current_device() self.head = nn.Linear(64, 10) if with_fsdp and freeze_after_wrap_fsdp: self.fsdp_wrap(fsdp_kwargs) self.autograd_ctx = ( torch.no_grad if disable_autograd else contextlib.nullcontext )
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
fsdp_wrap
def fsdp_wrap(self): self.trunk = FSDP(self.trunk) self.head = FSDP(self.head)
def fsdp_wrap(self, fsdp_kwargs): self.trunk = FSDP(self.trunk, **fsdp_kwargs) self.head = FSDP(self.head, **fsdp_kwargs)
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
fsdp_wrap
def fsdp_wrap(self): self.trunk = FSDP(self.trunk) self.head = FSDP(self.head)
def fsdp_wrap(self, fsdp_kwargs): self.trunk = FSDP(self.trunk, **fsdp_kwargs) self.head = FSDP(self.head, **fsdp_kwargs)
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
forward
def forward(self, x): return self.head(self.trunk(x))
def forward(self, x): with self.autograd_ctx(): x = self.trunk(x) return self.head(x)
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_hybrid_shard.py
test_fsdp_hybrid_shard_basic_setup
def test_fsdp_hybrid_shard_basic_setup(self): """ Tests basic functionality of HYBRID_SHARD and _HYBRID_SHARD_ZERO2: 1. Inter and intra-node process groups are correctly setup 2. Process groups are the same across FSDP wrapped instances 3. reduce_scatter and allreduce called the expected no. of times """ self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "sharding_strategy_mode": [ ShardingStrategyMode.ALL_HYBRID_SHARD, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD, ], }, self._test_fsdp_hybrid_shard_basic_setup, )
def test_fsdp_hybrid_shard_basic_setup(self): """ Tests basic functionality of HYBRID_SHARD and _HYBRID_SHARD_ZERO2: 1. Inter and intra-node process groups are correctly setup 2. Process groups are the same across FSDP wrapped instances 3. reduce_scatter and allreduce called the expected no. of times """ self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "sharding_strategy_mode": [ ShardingStrategyMode.ALL_HYBRID_SHARD, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD, ], "use_orig_params": [False, True], "use_device_mesh": [False, True], }, self._test_fsdp_hybrid_shard_basic_setup, )
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPHybridShard(FSDPTest):
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial from typing import List, Optional, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.device_mesh import init_device_mesh from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy, StateDictType, ) from torch.distributed.fsdp._init_utils import ( _init_intra_and_inter_node_groups, HYBRID_SHARDING_STRATEGIES, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPHybridShard(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_hybrid_shard.py
patched_collective
def patched_collective(orig_collective, counter, *args, **kwargs): counter[orig_collective] += 1 return orig_collective(*args, **kwargs) cntr = Counter() patched_allreduce = partial(patched_collective, orig_ar, cntr) patched_reduce_scatter = partial(patched_collective, orig_rs, cntr) with patch_allreduce(patched_allreduce), patch_reduce_scatter( patched_reduce_scatter ): inp = fsdp_model.get_input(device=torch.cuda.current_device()) out = fsdp_model(inp[0], inp[1]) loss = fsdp_model.get_loss(inp, out) loss.backward() if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: num_flat_params = len(list(traversal_utils._get_fsdp_handles(fsdp_model))) self.assertEqual(num_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: num_hsdp_flat_params = len( list(traversal_utils._get_fsdp_handles(fsdp_model.transformer)) ) num_flat_params = len(list(traversal_utils._get_fsdp_handles(fsdp_model))) self.assertEqual(num_hsdp_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs])
def patched_collective(orig_collective, counter, *args, **kwargs): counter[orig_collective] += 1 return orig_collective(*args, **kwargs) cntr = Counter() patched_allreduce = partial(patched_collective, orig_ar, cntr) patched_reduce_scatter = partial(patched_collective, orig_rs, cntr) with patch_allreduce(patched_allreduce), patch_reduce_scatter( patched_reduce_scatter ): inp = hsdp_model.get_input(device=torch.cuda.current_device()) out = hsdp_model(inp[0], inp[1]) loss = hsdp_model.get_loss(inp, out) loss.backward() if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model))) self.assertEqual(num_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: num_hsdp_flat_params = len( list(traversal_utils._get_fsdp_handles(hsdp_model.transformer)) ) num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model))) self.assertEqual(num_hsdp_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs])
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, )
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial from typing import List, Optional, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.device_mesh import init_device_mesh from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy, StateDictType, ) from torch.distributed.fsdp._init_utils import ( _init_intra_and_inter_node_groups, HYBRID_SHARDING_STRATEGIES, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_hybrid_shard.py
test_fsdp_hybrid_shard_parity
def _init_hsdp_model( self, hsdp_sharding_strategy: ShardingStrategy, sharding_strategy_mode: str, ): if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) fsdp_kwargs = { "auto_wrap_policy": auto_wrap_policy, "device_id": torch.cuda.current_device(), "sharding_strategy": hsdp_sharding_strategy, } fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE, fsdp_kwargs, ) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.NO_FSDP, CUDAInitMode.CUDA_BEFORE, {}, ) transformer_auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) # Use the HSDP strategy for the transformer module model.transformer = FSDP( model.transformer, auto_wrap_policy=transformer_auto_wrap_policy, device_id=torch.cuda.current_device(), sharding_strategy=hsdp_sharding_strategy, ) # Use `FULL_SHARD` for the embedding and output projection fsdp_model = FSDP( model, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, ) return fsdp_model instantiate_parametrized_tests(TestFSDPHybridShard) if __name__ == "__main__": run_tests()
def test_fsdp_hybrid_shard_parity(self): self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "use_orig_params": [False, True], }, self._test_fsdp_hybrid_shard_parity, )
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial from typing import List, Optional, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.device_mesh import init_device_mesh from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy, StateDictType, ) from torch.distributed.fsdp._init_utils import ( _init_intra_and_inter_node_groups, HYBRID_SHARDING_STRATEGIES, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPHybridShard(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
__init__
def __init__(self, with_fsdp, freeze_after_wrap_fsdp): super().__init__() self.trunk = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.head = nn.Linear(64, 10) if with_fsdp and freeze_after_wrap_fsdp: self.fsdp_wrap()
def __init__( self, with_fsdp, freeze_after_wrap_fsdp, disable_autograd, fsdp_kwargs, ): super().__init__() self.trunk = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.device = torch.cuda.current_device() self.head = nn.Linear(64, 10) if with_fsdp and freeze_after_wrap_fsdp: self.fsdp_wrap(fsdp_kwargs) self.autograd_ctx = ( torch.no_grad if disable_autograd else contextlib.nullcontext )
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
fsdp_wrap
def fsdp_wrap(self): self.trunk = FSDP(self.trunk) self.head = FSDP(self.head)
def fsdp_wrap(self, fsdp_kwargs): self.trunk = FSDP(self.trunk, **fsdp_kwargs) self.head = FSDP(self.head, **fsdp_kwargs)
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
forward
def forward(self, x): return self.head(self.trunk(x))
def forward(self, x): with self.autograd_ctx(): x = self.trunk(x) return self.head(x)
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class Model(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_freezing_weights.py
_create_model
def _create_model(self, with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp): if with_nested_trunk: model = NestedTrunkModel(with_fsdp, freeze_after_wrap_fsdp) else: model = Model(with_fsdp, freeze_after_wrap_fsdp) return model
def _create_model( self, with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp, disable_autograd, fsdp_kwargs, ): if with_nested_trunk: model = NestedTrunkModel( with_fsdp, freeze_after_wrap_fsdp, disable_autograd, fsdp_kwargs ) else: model = Model( with_fsdp, freeze_after_wrap_fsdp, disable_autograd, fsdp_kwargs ) return model
import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFreezingWeights(FSDPTest):
import contextlib import sys from enum import Enum import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest, get_full_params from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFreezingWeights(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_grad_acc.py
permute_tensor
def permute_tensor(x: torch.Tensor): return x.view(-1)[torch.randperm(x.numel())].view_as(x) batch: Tuple[torch.Tensor, ...] = fsdp_model.module.get_input(device) batches: List[Tuple[torch.Tensor, ...]] = [batch] num_iters_to_acc = sum(config.num_iters for config in configs) for _ in range(num_iters_to_acc - 1): batches.append(tuple(permute_tensor(t) for t in batch)) for (batch1, batch2) in itertools.combinations(batches, r=2): for t1, t2 in zip(batch1, batch2): assert not torch.all( t1 == t2 ), "Check the test to make sure that batches are distinct" # Concatenate the batches along the given batch dimension concat_batch: Tuple[torch.Tensor, ...] = tuple( torch.cat(ts, dim=batch_dim) for ts in zip(*batches) ) # Establish reference gradients using the concatenated batch fsdp_model.zero_grad() output = fsdp_model(*concat_batch) ref_loss = fsdp_model.module.get_loss(concat_batch, output) ref_loss.backward() ref_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compute and accumulate the gradients fsdp_model.zero_grad() losses = [] batch_idx = 0 for config in configs: sync_context = ( fsdp_model.no_sync() if config.use_no_sync else contextlib.suppress() ) with sync_context: for _ in range(config.num_iters): if batch_idx == num_iters_to_acc - 1: break # always sync on the last iteration batch = batches[batch_idx] batch_idx += 1 output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward() losses.append(loss) output = fsdp_model(*batches[-1]) loss = fsdp_model.module.get_loss(batches[-1], output) loss.backward() losses.append(loss) acc_loss = sum(losses) acc_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compare the losses and gradients torch.testing.assert_close(ref_loss, acc_loss) self.assertEqual(len(ref_grads), len(acc_grads)) for ref_grad, acc_grad in zip(ref_grads, acc_grads): self.assertEqual(ref_grad.device, acc_grad.device) self.assertEqual(ref_grad.size(), acc_grad.size()) self.assertEqual(ref_grad.dtype, acc_grad.dtype) torch.testing.assert_close(ref_grad, acc_grad) # Check that the optimizer step does not error optim.step()
def permute_tensor(x: torch.Tensor): return x.view(-1)[torch.randperm(x.numel())].view_as(x) batch: Tuple[torch.Tensor, ...] = fsdp_model.module.get_input(device) batches: List[Tuple[torch.Tensor, ...]] = [batch] num_iters_to_acc = sum(config.num_iters for config in configs) for _ in range(num_iters_to_acc - 1): batches.append(tuple(permute_tensor(t) for t in batch)) for batch1, batch2 in itertools.combinations(batches, r=2): for t1, t2 in zip(batch1, batch2): assert not torch.all( t1 == t2 ), "Check the test to make sure that batches are distinct" # Concatenate the batches along the given batch dimension concat_batch: Tuple[torch.Tensor, ...] = tuple( torch.cat(ts, dim=batch_dim) for ts in zip(*batches) ) # Establish reference gradients using the concatenated batch fsdp_model.zero_grad() output = fsdp_model(*concat_batch) ref_loss = fsdp_model.module.get_loss(concat_batch, output) ref_loss.backward() ref_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compute and accumulate the gradients fsdp_model.zero_grad() losses = [] batch_idx = 0 for config in configs: sync_context = ( fsdp_model.no_sync() if config.use_no_sync else contextlib.nullcontext() ) with sync_context: for _ in range(config.num_iters): if batch_idx == num_iters_to_acc - 1: break # always sync on the last iteration batch = batches[batch_idx] batch_idx += 1 output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward() losses.append(loss) output = fsdp_model(*batches[-1]) loss = fsdp_model.module.get_loss(batches[-1], output) loss.backward() losses.append(loss) acc_loss = sum(losses) acc_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compare the losses and gradients torch.testing.assert_close(ref_loss, acc_loss) self.assertEqual(len(ref_grads), len(acc_grads)) for ref_grad, acc_grad in zip(ref_grads, acc_grads): self.assertEqual(ref_grad.device, acc_grad.device) self.assertEqual(ref_grad.size(), acc_grad.size()) self.assertEqual(ref_grad.dtype, acc_grad.dtype) torch.testing.assert_close(ref_grad, acc_grad) # Check that the optimizer step does not error optim.step()
import contextlib import itertools import sys from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple import torch from torch import distributed as dist from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, )
import contextlib import itertools import sys from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple import torch from torch import distributed as dist from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfRocm, TEST_WITH_DEV_DBG_ASAN, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_grad_acc.py
permute_tensor
def permute_tensor(x: torch.Tensor): return x.view(-1)[torch.randperm(x.numel())].view_as(x) batch: Tuple[torch.Tensor, ...] = fsdp_model.module.get_input(device) batches: List[Tuple[torch.Tensor, ...]] = [batch] num_iters_to_acc = sum(config.num_iters for config in configs) for _ in range(num_iters_to_acc - 1): batches.append(tuple(permute_tensor(t) for t in batch)) for (batch1, batch2) in itertools.combinations(batches, r=2): for t1, t2 in zip(batch1, batch2): assert not torch.all( t1 == t2 ), "Check the test to make sure that batches are distinct" # Concatenate the batches along the given batch dimension concat_batch: Tuple[torch.Tensor, ...] = tuple( torch.cat(ts, dim=batch_dim) for ts in zip(*batches) ) # Establish reference gradients using the concatenated batch fsdp_model.zero_grad() output = fsdp_model(*concat_batch) ref_loss = fsdp_model.module.get_loss(concat_batch, output) ref_loss.backward() ref_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compute and accumulate the gradients fsdp_model.zero_grad() losses = [] batch_idx = 0 for config in configs: sync_context = ( fsdp_model.no_sync() if config.use_no_sync else contextlib.suppress() ) with sync_context: for _ in range(config.num_iters): if batch_idx == num_iters_to_acc - 1: break # always sync on the last iteration batch = batches[batch_idx] batch_idx += 1 output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward() losses.append(loss) output = fsdp_model(*batches[-1]) loss = fsdp_model.module.get_loss(batches[-1], output) loss.backward() losses.append(loss) acc_loss = sum(losses) acc_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compare the losses and gradients torch.testing.assert_close(ref_loss, acc_loss) self.assertEqual(len(ref_grads), len(acc_grads)) for ref_grad, acc_grad in zip(ref_grads, acc_grads): self.assertEqual(ref_grad.device, acc_grad.device) self.assertEqual(ref_grad.size(), acc_grad.size()) self.assertEqual(ref_grad.dtype, acc_grad.dtype) torch.testing.assert_close(ref_grad, acc_grad) # Check that the optimizer step does not error optim.step()
def permute_tensor(x: torch.Tensor): return x.view(-1)[torch.randperm(x.numel())].view_as(x) batch: Tuple[torch.Tensor, ...] = fsdp_model.module.get_input(device) batches: List[Tuple[torch.Tensor, ...]] = [batch] num_iters_to_acc = sum(config.num_iters for config in configs) for _ in range(num_iters_to_acc - 1): batches.append(tuple(permute_tensor(t) for t in batch)) for batch1, batch2 in itertools.combinations(batches, r=2): for t1, t2 in zip(batch1, batch2): assert not torch.all( t1 == t2 ), "Check the test to make sure that batches are distinct" # Concatenate the batches along the given batch dimension concat_batch: Tuple[torch.Tensor, ...] = tuple( torch.cat(ts, dim=batch_dim) for ts in zip(*batches) ) # Establish reference gradients using the concatenated batch fsdp_model.zero_grad() output = fsdp_model(*concat_batch) ref_loss = fsdp_model.module.get_loss(concat_batch, output) ref_loss.backward() ref_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compute and accumulate the gradients fsdp_model.zero_grad() losses = [] batch_idx = 0 for config in configs: sync_context = ( fsdp_model.no_sync() if config.use_no_sync else contextlib.nullcontext() ) with sync_context: for _ in range(config.num_iters): if batch_idx == num_iters_to_acc - 1: break # always sync on the last iteration batch = batches[batch_idx] batch_idx += 1 output = fsdp_model(*batch) loss = fsdp_model.module.get_loss(batch, output) loss.backward() losses.append(loss) output = fsdp_model(*batches[-1]) loss = fsdp_model.module.get_loss(batches[-1], output) loss.backward() losses.append(loss) acc_loss = sum(losses) acc_grads = [ p.grad.detach().clone() for p in fsdp_model.parameters() if p.grad is not None ] # Compare the losses and gradients torch.testing.assert_close(ref_loss, acc_loss) self.assertEqual(len(ref_grads), len(acc_grads)) for ref_grad, acc_grad in zip(ref_grads, acc_grads): self.assertEqual(ref_grad.device, acc_grad.device) self.assertEqual(ref_grad.size(), acc_grad.size()) self.assertEqual(ref_grad.dtype, acc_grad.dtype) torch.testing.assert_close(ref_grad, acc_grad) # Check that the optimizer step does not error optim.step()
import contextlib import itertools import sys from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple import torch from torch import distributed as dist from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, )
import contextlib import itertools import sys from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple import torch from torch import distributed as dist from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, ShardingStrategy, ) from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, skipIfRocm, TEST_WITH_DEV_DBG_ASAN, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_hybrid_shard.py
patch_reduce_scatter
def patch_reduce_scatter(new_reduce_scatter): """ Patches dist.reduce_scatter_tensor with a new reduce_scatter_tensor and restores upon exiting. """ orig_reduce_scatter = dist.reduce_scatter_tensor dist.reduce_scatter_tensor = new_reduce_scatter try: yield finally: dist.reduce_scatter_tensor = orig_reduce_scatter class MyModel(nn.Module): def __init__(self): super().__init__() self.lin1 = nn.Linear(10, 10) self.lin2 = nn.Linear(10, 10) self.lin3 = nn.Linear(10, 10) class ShardingStrategyMode(Enum): ALL_HYBRID_SHARD = auto() MIXED_HYBRID_FULL_SHARD = auto() class TestFSDPHybridShard(FSDPTest): @property def world_size(self): return max(torch.cuda.device_count(), 2) @property def process_group(self): return dist.distributed_c10d._get_default_group() @skip_if_lt_x_gpu(2) def test_raises_manual_wrap_hybrid_shard_when_none_policy(self): model = MyModel().cuda() err_ctx = self.assertRaisesRegex( ValueError, "requires explicit specification of process group" ) with err_ctx: model = FSDP(model, sharding_strategy=ShardingStrategy.HYBRID_SHARD) with err_ctx: model = FSDP(model, sharding_strategy=ShardingStrategy._HYBRID_SHARD_ZERO2) @skip_if_lt_x_gpu(2) def test_hybrid_shard_pg_mismatch_raises(self): model = MyModel().cuda() intra_pg = self.process_group inter_pg = dist.new_group(ranks=[self.rank]) # Mismatched process groups for intra-node model.lin1 = FSDP( model.lin1, process_group=(intra_pg, inter_pg), sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) model = FSDP( model, process_group=(dist.new_group(), dist.new_group()), sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) # Errors during _lazy_init inp = torch.randn(4, 10) with self.assertRaisesRegex( ValueError, "intra-node process groups do not match" ): model(inp) # Mismatched process groups for inter-node model = MyModel().cuda() model.lin1 = FSDP( model.lin1, process_group=(intra_pg, inter_pg), sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) model = FSDP( model, process_group=(intra_pg, dist.new_group()), sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) with self.assertRaisesRegex( ValueError, "inter-node process groups do not match" ): model(inp) @skip_if_lt_x_gpu(2) def test_invalid_pg_specification_raises(self): pol = ModuleWrapPolicy({nn.Linear}) model = MyModel().cuda() with self.assertRaisesRegex( ValueError, "Expected process_group to be passed in" ): model = FSDP( model, auto_wrap_policy=pol, process_group=self.process_group, sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) # TODO - add test for ZeRO-2 style sharding ensure params are not # resharded after forward. @skip_if_lt_x_gpu(2) def test_fsdp_hybrid_shard_basic_setup(self): """ Tests basic functionality of HYBRID_SHARD and _HYBRID_SHARD_ZERO2: 1. Inter and intra-node process groups are correctly setup 2. Process groups are the same across FSDP wrapped instances 3. reduce_scatter and allreduce called the expected no. of times """ self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "sharding_strategy_mode": [ ShardingStrategyMode.ALL_HYBRID_SHARD, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD, ], }, self._test_fsdp_hybrid_shard_basic_setup, ) def _test_fsdp_hybrid_shard_basic_setup( self, hsdp_sharding_strategy: ShardingStrategy, sharding_strategy_mode: ShardingStrategyMode, ): auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) fsdp_kwargs = { "auto_wrap_policy": auto_wrap_policy, "device_id": torch.cuda.current_device(), "sharding_strategy": hsdp_sharding_strategy, } fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE, fsdp_kwargs, ) fsdp_model = self._init_hsdp_model( hsdp_sharding_strategy, sharding_strategy_mode ) # All FSDP modules should have state.process_group as the process group over which to # shard (default process group), and state._inter_node_pg (process group containing only # this rank) intra_node_pgs = set() inter_node_pgs = set() for fsdp_module in fsdp_model.fsdp_modules(fsdp_model): # TODO: This needs to be replaced if we deprecate # `FSDP.sharding_strategy` to only use the handle one. # https://github.com/pytorch/pytorch/issues/90857 if fsdp_module.sharding_strategy not in HYBRID_SHARDING_STRATEGIES: self.assertEqual( sharding_strategy_mode, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD ) self.assertEqual( fsdp_module.sharding_strategy, ShardingStrategy.FULL_SHARD ) continue # process_group should be across the node, which is just the # whole world here. self.assertEqual( dist.get_world_size(fsdp_module.process_group), dist.get_world_size(self.process_group), ) intra_node_pgs.add(fsdp_module.process_group) inter_node_pg = fsdp_module._inter_node_pg inter_node_pgs.add(inter_node_pg) self.assertEqual(1, dist.get_world_size(inter_node_pg)) self.assertFalse(_rank_not_in_group(inter_node_pg)) self.assertEqual(hsdp_sharding_strategy, fsdp_module.sharding_strategy) # All fsdp modules should share the same process groups self.assertEqual(1, len(intra_node_pgs)) self.assertEqual(1, len(inter_node_pgs)) orig_ar = dist.all_reduce orig_rs = dist.reduce_scatter_tensor def patched_collective(orig_collective, counter, *args, **kwargs): counter[orig_collective] += 1 return orig_collective(*args, **kwargs) cntr = Counter() patched_allreduce = partial(patched_collective, orig_ar, cntr) patched_reduce_scatter = partial(patched_collective, orig_rs, cntr) with patch_allreduce(patched_allreduce), patch_reduce_scatter( patched_reduce_scatter ): inp = fsdp_model.get_input(device=torch.cuda.current_device()) out = fsdp_model(inp[0], inp[1]) loss = fsdp_model.get_loss(inp, out) loss.backward() if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: num_flat_params = len(list(traversal_utils._get_fsdp_handles(fsdp_model))) self.assertEqual(num_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: num_hsdp_flat_params = len( list(traversal_utils._get_fsdp_handles(fsdp_model.transformer)) ) num_flat_params = len(list(traversal_utils._get_fsdp_handles(fsdp_model))) self.assertEqual(num_hsdp_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) def _init_hsdp_model( self, hsdp_sharding_strategy: ShardingStrategy, sharding_strategy_mode: str, ): if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) fsdp_kwargs = { "auto_wrap_policy": auto_wrap_policy, "device_id": torch.cuda.current_device(), "sharding_strategy": hsdp_sharding_strategy, } fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE, fsdp_kwargs, ) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.NO_FSDP, CUDAInitMode.CUDA_BEFORE, {}, ) transformer_auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) # Use the HSDP strategy for the transformer module model.transformer = FSDP( model.transformer, auto_wrap_policy=transformer_auto_wrap_policy, device_id=torch.cuda.current_device(), sharding_strategy=hsdp_sharding_strategy, ) # Use `FULL_SHARD` for the embedding and output projection fsdp_model = FSDP( model, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, ) return fsdp_model instantiate_parametrized_tests(TestFSDPHybridShard) if __name__ == "__main__": run_tests()
def patch_reduce_scatter(new_reduce_scatter): """ Patches dist.reduce_scatter_tensor with a new reduce_scatter_tensor and restores upon exiting. """ orig_reduce_scatter = dist.reduce_scatter_tensor dist.reduce_scatter_tensor = new_reduce_scatter try: yield finally: dist.reduce_scatter_tensor = orig_reduce_scatter class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.lin1 = nn.Linear(10, 10) self.lin2 = nn.Linear(10, 10) self.lin3 = nn.Linear(10, 10) def forward(self, x): return self.lin3(self.lin2(self.lin1(x))) class ShardingStrategyMode(Enum): ALL_HYBRID_SHARD = auto() MIXED_HYBRID_FULL_SHARD = auto() class TestFSDPHybridShard(FSDPTest): @property def world_size(self): return max(torch.cuda.device_count(), 2) @property def process_group(self): return dist.distributed_c10d._get_default_group() @skip_if_lt_x_gpu(2) def test_raises_manual_wrap_hybrid_shard_when_none_policy(self): model = MyModel().cuda() err_ctx = self.assertRaisesRegex( ValueError, "requires explicit specification of process group or device_mesh.", ) with err_ctx: model = FSDP(model, sharding_strategy=ShardingStrategy.HYBRID_SHARD) with err_ctx: model = FSDP(model, sharding_strategy=ShardingStrategy._HYBRID_SHARD_ZERO2) @skip_if_lt_x_gpu(4) def test_hsdp_save_load_state_dict(self): model = MyModel().cuda() num_node_devices = torch.cuda.device_count() shard_rank_lists = list(range(0, num_node_devices // 2)), list( range(num_node_devices // 2, num_node_devices) ) shard_groups = ( dist.new_group(shard_rank_lists[0]), dist.new_group(shard_rank_lists[1]), ) my_shard_group = ( shard_groups[0] if self.rank in shard_rank_lists[0] else shard_groups[1] ) my_replicate_group = None my_rank = self.rank # Create groups like (0, 4), (1, 5), (2, 6) etc and assign appropriately shard_factor = len(shard_rank_lists[0]) for i in range(num_node_devices // 2): replicate_group_ranks = list(range(i, num_node_devices, shard_factor)) replicate_group = dist.new_group(replicate_group_ranks) if my_rank in replicate_group_ranks: my_replicate_group = replicate_group fsdp_ctor = partial( FSDP, sharding_strategy=ShardingStrategy.HYBRID_SHARD, use_orig_params=True, process_group=(my_shard_group, my_replicate_group), ) model = fsdp_ctor(model) optim = torch.optim.AdamW(model.parameters()) # Initialize optimizer states model(torch.randn(2, 10)).sum().backward() optim.step() shard_g = model.process_group replicate_g = model._inter_node_pg assert shard_g == my_shard_group assert replicate_g == my_replicate_group with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT): msd = model.state_dict() osd = FSDP.optim_state_dict(model, optim) load_model = fsdp_ctor(MyModel().cuda()) load_optim = torch.optim.AdamW(load_model.parameters()) with FSDP.state_dict_type(load_model, StateDictType.SHARDED_STATE_DICT): load_model.load_state_dict(msd) FSDP.optim_state_dict_to_load(load_model, load_optim, osd) load_optim.load_state_dict(osd) @skip_if_lt_x_gpu(4) def test_hsdp_sync_module_state(self): model = MyModel().cuda() num_node_devices = torch.cuda.device_count() shard_rank_lists = list(range(0, num_node_devices // 2)), list( range(num_node_devices // 2, num_node_devices) ) shard_groups = ( dist.new_group(shard_rank_lists[0]), dist.new_group(shard_rank_lists[1]), ) my_shard_group = ( shard_groups[0] if self.rank in shard_rank_lists[0] else shard_groups[1] ) my_replicate_group = None my_rank = self.rank # Create groups like (0, 4), (1, 5), (2, 6) etc and assign appropriately shard_factor = len(shard_rank_lists[0]) for i in range(num_node_devices // 2): replicate_group_ranks = list(range(i, num_node_devices, shard_factor)) replicate_group = dist.new_group(replicate_group_ranks) if my_rank in replicate_group_ranks: my_replicate_group = replicate_group nn.init.constant_(model.lin1.weight, self.rank) nn.init.constant_(model.lin2.weight, self.rank) nn.init.constant_(model.lin3.weight, self.rank) fsdp_ctor = partial( FSDP, sharding_strategy=ShardingStrategy.HYBRID_SHARD, use_orig_params=True, sync_module_states=True, process_group=(my_shard_group, my_replicate_group), ) model = fsdp_ctor(model) with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT): self.assertTrue((model.lin1.weight == 0).all()) self.assertTrue((model.lin2.weight == 0).all()) self.assertTrue((model.lin3.weight == 0).all()) @skip_if_lt_x_gpu(2) def test_invalid_pg_specification_raises(self): pol = ModuleWrapPolicy({nn.Linear}) model = MyModel().cuda() with self.assertRaisesRegex( ValueError, "Expected process_group to be passed in" ): model = FSDP( model, auto_wrap_policy=pol, process_group=self.process_group, sharding_strategy=ShardingStrategy.HYBRID_SHARD, ) # TODO - add test for ZeRO-2 style sharding ensure params are not # resharded after forward. @skip_if_lt_x_gpu(2) def test_fsdp_hybrid_shard_basic_setup(self): """ Tests basic functionality of HYBRID_SHARD and _HYBRID_SHARD_ZERO2: 1. Inter and intra-node process groups are correctly setup 2. Process groups are the same across FSDP wrapped instances 3. reduce_scatter and allreduce called the expected no. of times """ self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "sharding_strategy_mode": [ ShardingStrategyMode.ALL_HYBRID_SHARD, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD, ], "use_orig_params": [False, True], "use_device_mesh": [False, True], }, self._test_fsdp_hybrid_shard_basic_setup, ) def _test_fsdp_hybrid_shard_basic_setup( self, hsdp_sharding_strategy: ShardingStrategy, sharding_strategy_mode: ShardingStrategyMode, use_orig_params: bool, use_device_mesh: bool, ): if use_device_mesh: device_mesh = init_device_mesh("cuda", (1, self.world_size)) else: device_mesh = None hsdp_model = self._init_hsdp_model( hsdp_sharding_strategy, sharding_strategy_mode, use_orig_params, hsdp_device_mesh=device_mesh, ) # All FSDP modules should have state.process_group as the process group over which to # shard (default process group), and state._inter_node_pg (process group containing only # this rank) intra_node_pgs = set() inter_node_pgs = set() for fsdp_module in hsdp_model.fsdp_modules(hsdp_model): # TODO: This needs to be replaced if we deprecate # `FSDP.sharding_strategy` to only use the handle one. # https://github.com/pytorch/pytorch/issues/90857 if fsdp_module.sharding_strategy not in HYBRID_SHARDING_STRATEGIES: self.assertEqual( sharding_strategy_mode, ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD ) self.assertEqual( fsdp_module.sharding_strategy, ShardingStrategy.FULL_SHARD ) continue # process_group should be across the node, which is just the # whole world here. self.assertEqual( dist.get_world_size(fsdp_module.process_group), dist.get_world_size(self.process_group), ) intra_node_pgs.add(fsdp_module.process_group) inter_node_pg = fsdp_module._inter_node_pg inter_node_pgs.add(inter_node_pg) self.assertEqual(1, dist.get_world_size(inter_node_pg)) self.assertFalse(_rank_not_in_group(inter_node_pg)) self.assertEqual(hsdp_sharding_strategy, fsdp_module.sharding_strategy) # All fsdp modules should share the same process groups self.assertEqual(1, len(intra_node_pgs)) self.assertEqual(1, len(inter_node_pgs)) orig_ar = dist.all_reduce orig_rs = dist.reduce_scatter_tensor def patched_collective(orig_collective, counter, *args, **kwargs): counter[orig_collective] += 1 return orig_collective(*args, **kwargs) cntr = Counter() patched_allreduce = partial(patched_collective, orig_ar, cntr) patched_reduce_scatter = partial(patched_collective, orig_rs, cntr) with patch_allreduce(patched_allreduce), patch_reduce_scatter( patched_reduce_scatter ): inp = hsdp_model.get_input(device=torch.cuda.current_device()) out = hsdp_model(inp[0], inp[1]) loss = hsdp_model.get_loss(inp, out) loss.backward() if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model))) self.assertEqual(num_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: num_hsdp_flat_params = len( list(traversal_utils._get_fsdp_handles(hsdp_model.transformer)) ) num_flat_params = len(list(traversal_utils._get_fsdp_handles(hsdp_model))) self.assertEqual(num_hsdp_flat_params, cntr[orig_ar]) self.assertEqual(num_flat_params, cntr[orig_rs]) @skip_if_lt_x_gpu(4) def test_fsdp_hybrid_shard_parity(self): self.run_subtests( { "hsdp_sharding_strategy": [ ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2, ], "use_orig_params": [False, True], }, self._test_fsdp_hybrid_shard_parity, ) def _test_fsdp_hybrid_shard_parity( self, hsdp_sharding_strategy: ShardingStrategy, use_orig_params: bool ): fsdp_model = self._init_fsdp_model(use_orig_params) global_pg = dist.distributed_c10d._get_default_group() hsdp_pgs = _init_intra_and_inter_node_groups(global_pg, 2) hsdp_model = self._init_hsdp_model( hsdp_sharding_strategy, ShardingStrategyMode.ALL_HYBRID_SHARD, use_orig_params, hsdp_process_groups=hsdp_pgs, ) assert ( hsdp_model._inter_node_pg.size() > 1 ), "HSDP model initialized without replication" fsdp_optim = torch.optim.Adam(fsdp_model.parameters(), lr=1e-2) hsdp_optim = torch.optim.Adam(hsdp_model.parameters(), lr=1e-2) torch.manual_seed(global_pg.rank() + 1) for _ in range(5): inp = fsdp_model.module.get_input(torch.device("cuda")) losses: List[torch.Tensor] = [] for model, optim in ((fsdp_model, fsdp_optim), (hsdp_model, hsdp_optim)): optim.zero_grad() loss = model(*inp).sum() losses.append(loss) loss.backward() optim.step() self.assertEqual(losses[0], losses[1]) def _init_fsdp_model(self, use_orig_params: bool) -> nn.Module: auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) hsdp_kwargs = { "auto_wrap_policy": auto_wrap_policy, "device_id": torch.cuda.current_device(), "use_orig_params": use_orig_params, } fsdp_model = TransformerWithSharedParams.init( self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE, hsdp_kwargs, deterministic=True, ) return fsdp_model def _init_hsdp_model( self, hsdp_sharding_strategy: ShardingStrategy, sharding_strategy_mode: str, use_orig_params: bool, hsdp_process_groups: Optional[ Tuple[dist.ProcessGroup, dist.ProcessGroup] ] = None, hsdp_device_mesh: Optional = None, ): assert hsdp_process_groups is None or hsdp_device_mesh is None auto_wrap_policy = ModuleWrapPolicy( {TransformerEncoderLayer, TransformerDecoderLayer}, ) hsdp_kwargs = { "device_id": torch.cuda.current_device(), "auto_wrap_policy": auto_wrap_policy, "sharding_strategy": hsdp_sharding_strategy, "use_orig_params": use_orig_params, "device_mesh": hsdp_device_mesh, } if sharding_strategy_mode == ShardingStrategyMode.ALL_HYBRID_SHARD: hsdp_model = TransformerWithSharedParams.init( hsdp_process_groups or self.process_group, FSDPInitMode.RECURSIVE, CUDAInitMode.CUDA_BEFORE, hsdp_kwargs, deterministic=True, ) elif sharding_strategy_mode == ShardingStrategyMode.MIXED_HYBRID_FULL_SHARD: model = TransformerWithSharedParams.init( hsdp_process_groups or self.process_group, FSDPInitMode.NO_FSDP, CUDAInitMode.CUDA_BEFORE, {}, deterministic=True, ) # Use the HSDP strategy for the transformer module model.transformer = FSDP(model.transformer, **hsdp_kwargs) # Use `FULL_SHARD` for the embedding and output projection hsdp_model = FSDP( model, device_id=torch.cuda.current_device(), sharding_strategy=ShardingStrategy.FULL_SHARD, use_orig_params=use_orig_params, ) return hsdp_model instantiate_parametrized_tests(TestFSDPHybridShard) if __name__ == "__main__": run_tests()
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, )
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial from typing import List, Optional, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.device_mesh import init_device_mesh from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy, StateDictType, ) from torch.distributed.fsdp._init_utils import ( _init_intra_and_inter_node_groups, HYBRID_SHARDING_STRATEGIES, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_hybrid_shard.py
__init__
def __init__(self): super().__init__() self.lin1 = nn.Linear(10, 10) self.lin2 = nn.Linear(10, 10) self.lin3 = nn.Linear(10, 10)
def __init__(self) -> None: super().__init__() self.lin1 = nn.Linear(10, 10) self.lin2 = nn.Linear(10, 10) self.lin3 = nn.Linear(10, 10)
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy from torch.distributed.fsdp._init_utils import HYBRID_SHARDING_STRATEGIES from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class MyModel(nn.Module):
import contextlib import sys from collections import Counter from enum import auto, Enum from functools import partial from typing import List, Optional, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.device_mesh import init_device_mesh from torch.distributed.distributed_c10d import _rank_not_in_group from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ShardingStrategy, StateDictType, ) from torch.distributed.fsdp._init_utils import ( _init_intra_and_inter_node_groups, HYBRID_SHARDING_STRATEGIES, ) from torch.distributed.fsdp.wrap import ModuleWrapPolicy from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_ignored_modules.py
test_diff_ignored_modules_across_ranks
def test_diff_ignored_modules_across_ranks(self): """ Tests ignoring different modules across ranks. Args: pass_ignored_modules_to_root (bool): If ``False``, does not pass any ignored modules (including those already ignored in child FSDP instances) to the root FSDP instance; if ``True``, passes all ignored modules (representing a superset of the children's ignored modules) to the root FSDP instance. """ self.run_subtests( { "pass_ignored_modules_to_root": [False, True], "ignore_modules": [True, False], }, self._test_diff_ignored_modules_across_ranks, )
def test_diff_ignored_modules_across_ranks(self): """ Tests ignoring different modules across ranks. Args: pass_ignored_modules_to_root (bool): If ``False``, does not pass any ignored modules (including those already ignored in child FSDP instances) to the root FSDP instance; if ``True``, passes all ignored modules (representing a superset of the children's ignored modules) to the root FSDP instance. """ self.run_subtests( { "pass_ignored_modules_to_root": [False, True], "ignore_modules": [True, False], "composable": [True, False], }, self._test_diff_ignored_modules_across_ranks, )
import sys import torch import torch.nn as nn from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPIgnoredModules(FSDPTest):
import functools import math import sys import torch import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch import distributed as dist from torch.distributed._composable import fully_shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_module_fsdp_state from torch.distributed.fsdp.wrap import ModuleWrapPolicy, transformer_auto_wrap_policy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPIgnoredModules(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_ignored_modules.py
test_ignored_states_check
def test_ignored_states_check(self): """ Tests that passing invalid ``ignored_modules`` or ``ignored_states`` raises an appropriate error. """ self.run_subtests( {"ignore_modules": [True, False]}, self._test_ignored_states_check, )
import functools import math import sys import torch import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch import distributed as dist from torch.distributed._composable import fully_shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_module_fsdp_state from torch.distributed.fsdp.wrap import ModuleWrapPolicy, transformer_auto_wrap_policy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPIgnoredModules(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_ignored_modules.py
_test_ignored_states_check
instantiate_parametrized_tests(TestFSDPIgnoredModules) if __name__ == "__main__": run_tests()
def _test_ignored_states_check(self, ignore_modules: bool): model = Model().cuda() ignored_modules = list(model.layer1.children())[1:] ignored_params = {p for m in ignored_modules for p in m.parameters()} ignored_states = ignored_params.union(set(ignored_modules)) if ignore_modules: # Check that passing `ignored_modules` not as uniformly `nn.Module` # raises an error with self.assertRaisesRegex( ValueError, "ignored_modules expects nn.Module list elements but got types " r"\[<class 'torch.nn.parameter.Parameter'>\]", ): FSDP(model, ignored_modules=ignored_params) # Check that passing both `ignored_modules` and `ignored_states` # raises an error (and fold this only into `ignore_modules=True`) with self.assertRaisesRegex( ValueError, "Cannot pass both ignored_modules and ignored_states at the same time", ): FSDP( model, ignored_modules=ignored_modules, ignored_states=ignored_params, ) else: # Check that passing `ignored_states` not as uniformly # `nn.Parameter` or uniformly `nn.Module` raises an error with self.assertRaisesRegex( ValueError, "ignored_states expects all nn.Parameter or all nn.Module list " r"elements but got types \[<class 'torch.nn.modules.linear.Linear'>, " r"<class 'torch.nn.parameter.Parameter'>\]", ): FSDP(model, ignored_states=ignored_states)
import functools import math import sys import torch import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch import distributed as dist from torch.distributed._composable import fully_shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._common_utils import _get_module_fsdp_state from torch.distributed.fsdp.wrap import ModuleWrapPolicy, transformer_auto_wrap_policy from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( CUDAInitMode, FSDPInitMode, FSDPTest, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class TestFSDPIgnoredModules(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/fsdp/test_fsdp_input.py
test_input_type
def test_input_type(self, input_cls): """Test FSDP with input being a list or a dict, only single GPU.""" class Model(Module): def __init__(self): super().__init__() self.layer = Linear(4, 4) def forward(self, input): if isinstance(input, list): input = input[0] else: assert isinstance(input, dict), input input = input["in"] return self.layer(input) model = FSDP(Model()).cuda() optim = SGD(model.parameters(), lr=0.1) for _ in range(5): in_data = torch.rand(64, 4).cuda() in_data.requires_grad = True if input_cls is list: in_data = [in_data] else: self.assertTrue(input_cls is dict) in_data = {"in": in_data} out = model(in_data) out.sum().backward() optim.step() optim.zero_grad()
def test_input_type(self, input_cls): """Test FSDP with input being a list or a dict, only single GPU.""" class Model(Module): def __init__(self) -> None: super().__init__() self.layer = Linear(4, 4) def forward(self, input): if isinstance(input, list): input = input[0] else: assert isinstance(input, dict), input input = input["in"] return self.layer(input) model = FSDP(Model()).cuda() optim = SGD(model.parameters(), lr=0.1) for _ in range(5): in_data = torch.rand(64, 4).cuda() in_data.requires_grad = True if input_cls is list: in_data = [in_data] else: self.assertTrue(input_cls is dict) in_data = {"in": in_data} out = model(in_data) out.sum().backward() optim.step() optim.zero_grad()
import sys import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn import Linear, Module from torch.optim import SGD from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_DEV_DBG_ASAN, ) class TestInput(FSDPTest):
import sys import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn import Linear, Module from torch.optim import SGD from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_DEV_DBG_ASAN, ) class TestInput(FSDPTest):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_input.py
__init__
def __init__(self): super().__init__() self.layer = Linear(4, 4)
def __init__(self) -> None: super().__init__() self.layer = Linear(4, 4)
import sys import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn import Linear, Module from torch.optim import SGD from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_DEV_DBG_ASAN, ) class Model(Module):
import sys import torch from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.nn import Linear, Module from torch.optim import SGD from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, subtest, TEST_WITH_DEV_DBG_ASAN, ) class Model(Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_memory.py
get_cur_mem
def get_cur_mem(rank, result, prefix): """Collect memory allocated values in a result dict in MB""" torch._C._cuda_clearCublasWorkspaces() result[prefix] = round(torch.cuda.memory_allocated() / 1024 / 1024) class Model(nn.Module): def __init__(self, hidden_dim, with_fsdp=False, with_checkpoint=False): super().__init__() if with_fsdp: self.stem = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), FSDP(nn.BatchNorm2d(64)), nn.ReLU(inplace=True), ) else: self.stem = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) if with_fsdp: self.blocks = nn.Sequential( nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) else: self.blocks = nn.Sequential( nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.head = nn.Linear(hidden_dim, 10) self.with_checkpoint = with_checkpoint def forward(self, x): if self.with_checkpoint: return self.head(checkpoint(self.blocks, self.stem(x))) else: return self.head(self.blocks(self.stem(x)))
def get_cur_mem(rank, result, prefix): """Collect memory allocated values in a result dict in MB""" torch._C._cuda_clearCublasWorkspaces() result[prefix] = round(torch.cuda.memory_allocated() / 1024 / 1024) class Model(nn.Module): def __init__(self, hidden_dim, with_fsdp=False, with_checkpoint=False): super().__init__() if with_fsdp: self.stem = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), FSDP(nn.BatchNorm2d(64)), nn.ReLU(inplace=True), ) else: self.stem = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) if with_fsdp: self.blocks = nn.Sequential( nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), FSDP(nn.BatchNorm2d(hidden_dim)), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) else: self.blocks = nn.Sequential( nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), ) self.head = nn.Linear(hidden_dim, 10) self.with_checkpoint = with_checkpoint def forward(self, x): if self.with_checkpoint: return self.head(checkpoint(self.blocks, self.stem(x), use_reentrant=True)) else: return self.head(self.blocks(self.stem(x)))
import sys import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) from torch.utils.checkpoint import checkpoint
import sys import torch import torch.nn as nn import torch.optim as optim from torch import distributed as dist from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import FSDPTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) from torch.utils.checkpoint import checkpoint
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/fsdp/test_fsdp_misc.py
__init__
def __init__(self): super().__init__() self.lin = nn.Linear(100, 100)
def __init__(self) -> None: super().__init__() self.a = nn.Linear(2, 2) self.b = nn.Linear(2, 2)
import functools import sys import warnings from collections import namedtuple from contextlib import suppress from copy import deepcopy from typing import Any, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.fsdp import ( CPUOffload, FlatParameter, FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp._runtime_utils import HOMOGENEOUS_ATTR_NAMES from torch.distributed.fsdp.wrap import ( always_wrap_policy, ModuleWrapPolicy, transformer_auto_wrap_policy, ) from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( _assert_module_states, CUDAInitMode, FSDPInitMode, FSDPTest, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class MyModule(nn.Module):
import functools import os import sys import warnings from collections import namedtuple from contextlib import nullcontext from copy import deepcopy from itertools import chain from typing import Any, Tuple import torch import torch.distributed as dist import torch.distributed.fsdp._traversal_utils as traversal_utils import torch.nn as nn from torch.distributed.fsdp import ( CPUOffload, FlatParameter, FullyShardedDataParallel as FSDP, ShardingStrategy, ) from torch.distributed.fsdp._flat_param import _FSDP_USE_UNSAFE_SETATTR from torch.distributed.fsdp._runtime_utils import HOMOGENEOUS_ATTR_NAMES from torch.distributed.fsdp.wrap import ( always_wrap_policy, ModuleWrapPolicy, transformer_auto_wrap_policy, ) from torch.distributed.optim import _apply_optimizer_in_backward from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import skip_if_lt_x_gpu from torch.testing._internal.common_fsdp import ( _assert_module_states, CUDAInitMode, FSDPInitMode, FSDPTest, FSDPTestMultiThread, MLP, NestedWrappedModule, TransformerWithSharedParams, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TEST_WITH_DEV_DBG_ASAN, ) class MyModel(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified