diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3572cfc4c6a0ddc3d8fa2e1b056415204acdfa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py @@ -0,0 +1 @@ +# mypy: ignore-errors diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50632e64037b94c2901b75588e539e1b6337d231 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4fb13abb5b96d38e41dd7d7d097a4afd34e7077f --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py @@ -0,0 +1,5314 @@ +# mypy: ignore-errors + +r"""Importing this file must **not** initialize CUDA context. test_distributed +relies on this assumption to properly run. This means that when this is imported +no CUDA calls shall be made, including torch.cuda.device_count(), etc. + +torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. +""" + +import argparse +import contextlib +import copy +import ctypes +import errno +import functools +import gc +import hashlib +import inspect +import io +import json +import logging +import math +import operator +import os +import platform +import random +import re +import shutil +import signal +import socket +import subprocess +import sys +import tempfile +import threading +import time +import types +import unittest +import warnings +from collections.abc import Mapping, Sequence +from contextlib import closing, contextmanager +from copy import deepcopy +from dataclasses import dataclass +from enum import Enum +from functools import partial, wraps +from itertools import product, chain +from pathlib import Path +from statistics import mean +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) +from unittest.mock import MagicMock + +import expecttest +import numpy as np + +import __main__ # type: ignore[import] +import torch +import torch.backends.cudnn +import torch.backends.mkl +import torch.backends.mps +import torch.backends.xnnpack +import torch.cuda +from torch import Tensor +from torch._C import ScriptDict, ScriptList # type: ignore[attr-defined] +from torch._dynamo.trace_rules import _as_posix_path +from torch._utils_internal import get_writable_path +from torch.nn import ( + ModuleDict, + ModuleList, + ParameterDict, + ParameterList, + Sequential, +) +from torch.onnx import ( + register_custom_op_symbolic, + unregister_custom_op_symbolic, +) +from torch.testing import make_tensor +from torch.testing._comparison import ( + BooleanPair, + NonePair, + NumberPair, + Pair, + TensorLikePair, +) +from torch.testing._comparison import not_close_error_metas +from torch.testing._internal.common_dtype import get_all_dtypes +from torch.utils._import_utils import _check_module_exists +import torch.utils._pytree as pytree +try: + import pytest + has_pytest = True +except ImportError: + has_pytest = False + + +MI300_ARCH = ("gfx940", "gfx941", "gfx942") + + +def freeze_rng_state(*args, **kwargs): + return torch.testing._utils.freeze_rng_state(*args, **kwargs) + + +# Class to keep track of test flags configurable by environment variables. +# Flags set here are intended to be read-only and should not be modified after +# definition. +# TODO: Expand this class to handle abritrary settings in addition to boolean flags? +class TestEnvironment: + # Set of env vars to set for the repro command that is output on test failure. + # Specifically, this includes env vars that are set to non-default values and + # are not implied. Maps from env var name -> value (int) + repro_env_vars: dict = {} + + # Defines a flag usable throughout the test suite, determining its value by querying + # the specified environment variable. + # + # Args: + # name (str): The name of the flag. A global variable with this name will be set + # for convenient access throughout the test suite. + # env_var (str): The name of the primary environment variable from which to + # determine the value of this flag. If this is None or the environment variable + # is unset, the default value will be used unless otherwise implied (see + # implied_by_fn). Default: None + # default (bool): The default value to use for the flag if unset by the environment + # variable and unimplied. Default: False + # include_in_repro (bool): Indicates whether this flag should be included in the + # repro command that is output on test failure (i.e. whether it is possibly + # relevant to reproducing the test failure). Default: True + # enabled_fn (Callable): Callable returning whether the flag should be enabled + # given the environment variable value and the default value. Default: Lambda + # requiring "0" to disable if on by default OR "1" to enable if off by default. + # implied_by_fn (Callable): Thunk returning a bool to imply this flag as enabled + # by something outside of its primary environment variable setting. For example, + # this can be useful if the value of another environment variable implies the flag + # as enabled. Default: Lambda returning False to indicate no implications. + @staticmethod + def def_flag( + name, + env_var=None, + default=False, + include_in_repro=True, + enabled_fn=lambda env_var_val, default: ( + (env_var_val != "0") if default else (env_var_val == "1")), + implied_by_fn=lambda: False, + ): + enabled = default + if env_var is not None: + env_var_val = os.getenv(env_var) + enabled = enabled_fn(env_var_val, default) + implied = implied_by_fn() + enabled = enabled or implied + if include_in_repro and (env_var is not None) and (enabled != default) and not implied: + TestEnvironment.repro_env_vars[env_var] = env_var_val + + # export flag globally for convenience + assert name not in globals(), f"duplicate definition of flag '{name}'" + globals()[name] = enabled + return enabled + + # Defines a setting usable throughout the test suite, determining its value by querying + # the specified environment variable. This differs from a flag in that it's not restricted + # to a boolean value. + # + # Args: + # name (str): The name of the setting. A global variable with this name will be set + # for convenient access throughout the test suite. + # env_var (str): The name of the primary environment variable from which to + # determine the value of this setting. If this is None or the environment variable + # is unset, the default value will be used. Default: None + # default (Any): The default value to use for the setting if unset by the environment + # variable. Default: None + # include_in_repro (bool): Indicates whether this setting should be included in the + # repro command that is output on test failure (i.e. whether it is possibly + # relevant to reproducing the test failure). Default: True + # parse_fn (Callable): Callable parsing the env var string. Default value just uses + # the string itself. + @staticmethod + def def_setting( + name, + env_var=None, + default=None, + include_in_repro=True, + parse_fn=lambda maybe_val_str: maybe_val_str, + ): + value = default if env_var is None else os.getenv(env_var) + value = parse_fn(value) + if include_in_repro and (value != default): + TestEnvironment.repro_env_vars[env_var] = value + + # export setting globally for convenience + assert name not in globals(), f"duplicate definition of setting '{name}'" + globals()[name] = value + return value + + # Returns a string prefix usable to set environment variables for any test + # settings that should be explicitly set to match this instantiation of the + # test suite. + # Example: "PYTORCH_TEST_WITH_ASAN=1 PYTORCH_TEST_WITH_ROCM=1" + @staticmethod + def repro_env_var_prefix() -> str: + return " ".join([f"{env_var}={value}" + for env_var, value in TestEnvironment.repro_env_vars.items()]) + + +log = logging.getLogger(__name__) +torch.backends.disable_global_flags() + +FILE_SCHEMA = "file://" +if sys.platform == 'win32': + FILE_SCHEMA = "file:///" + +# NB: This flag differs semantically from others in that setting the env var to any +# non-empty value will cause it to be true: +# CI=1, CI="true", CI=0, etc. all set the flag to be true. +# CI= and an unset CI set the flag to be false. +# GitHub sets the value to CI="true" to enable it. +IS_CI: bool = TestEnvironment.def_flag( + "IS_CI", + env_var="CI", + include_in_repro=False, + enabled_fn=lambda env_var_value, _: bool(env_var_value), +) +IS_SANDCASTLE: bool = TestEnvironment.def_flag( + "IS_SANDCASTLE", + env_var="SANDCASTLE", + implied_by_fn=lambda: os.getenv("TW_JOB_USER") == "sandcastle", + include_in_repro=False, +) + +_is_fbcode_default = ( + hasattr(torch._utils_internal, "IS_FBSOURCE") and + torch._utils_internal.IS_FBSOURCE +) + +IS_FBCODE: bool = TestEnvironment.def_flag( + "IS_FBCODE", + env_var="PYTORCH_TEST_FBCODE", + default=_is_fbcode_default, + include_in_repro=False, +) +IS_REMOTE_GPU: bool = TestEnvironment.def_flag( + "IS_REMOTE_GPU", + env_var="PYTORCH_TEST_REMOTE_GPU", + include_in_repro=False, +) + +DISABLE_RUNNING_SCRIPT_CHK: bool = TestEnvironment.def_flag( + "DISABLE_RUNNING_SCRIPT_CHK", + env_var="PYTORCH_DISABLE_RUNNING_SCRIPT_CHK", + include_in_repro=False, +) +# NB: enabled by default unless in an fbcode context. +PRINT_REPRO_ON_FAILURE: bool = TestEnvironment.def_flag( + "PRINT_REPRO_ON_FAILURE", + env_var="PYTORCH_PRINT_REPRO_ON_FAILURE", + default=(not IS_FBCODE), + include_in_repro=False, +) + +# possibly restrict OpInfo tests to a single sample input +OPINFO_SAMPLE_INPUT_INDEX: Optional[int] = TestEnvironment.def_setting( + "OPINFO_SAMPLE_INPUT_INDEX", + env_var="PYTORCH_OPINFO_SAMPLE_INPUT_INDEX", + default=None, + # Don't include the env var value in the repro command because the info will + # be queried from the tracked sample input instead + include_in_repro=False, + parse_fn=lambda val: None if val is None else int(val), +) + +DEFAULT_DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' +DEFAULT_SLOW_TESTS_FILE = 'slow_tests.json' + +disabled_tests_dict = {} +slow_tests_dict = {} + +def maybe_load_json(filename): + if os.path.isfile(filename): + with open(filename) as fp: + return json.load(fp) + log.warning("Attempted to load json file '%s' but it does not exist.", filename) + return {} + +# set them here in case the tests are running in a subprocess that doesn't call run_tests +if os.getenv("SLOW_TESTS_FILE", ""): + slow_tests_dict = maybe_load_json(os.getenv("SLOW_TESTS_FILE", "")) +if os.getenv("DISABLED_TESTS_FILE", ""): + disabled_tests_dict = maybe_load_json(os.getenv("DISABLED_TESTS_FILE", "")) + +NATIVE_DEVICES = ('cpu', 'cuda', 'xpu', 'meta', torch._C._get_privateuse1_backend_name()) + +check_names = ['orin', 'concord', 'galen', 'xavier', 'nano', 'jetson', 'tegra'] +IS_JETSON = any(name in platform.platform() for name in check_names) + +def gcIfJetson(fn): + # Irregular Jetson host/device memory setup requires cleanup to avoid tests being killed + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if IS_JETSON: + gc.collect() + torch.cuda.empty_cache() + fn(*args, **kwargs) + return wrapper + +# Tries to extract the current test function by crawling the stack. +# If unsuccessful, return None. +def extract_test_fn() -> Optional[Callable]: + try: + stack = inspect.stack() + for frame_info in stack: + frame = frame_info.frame + if "self" not in frame.f_locals: + continue + self_val = frame.f_locals["self"] + if isinstance(self_val, unittest.TestCase): + test_id = self_val.id() + test_name = test_id.split('.')[2] + test_fn = getattr(self_val, test_name).__func__ + return test_fn + except Exception: + pass + return None + +# Contains tracked input data useful for debugging purposes +@dataclass +class TrackedInput: + index: int + val: Any + type_desc: str + +# Attempt to pull out tracked input information from the test function. +# A TrackedInputIter is used to insert this information. +def get_tracked_input() -> Optional[TrackedInput]: + test_fn = extract_test_fn() + if test_fn is None: + return None + if not hasattr(test_fn, "tracked_input"): + return None + return test_fn.tracked_input + +def clear_tracked_input(): + test_fn = extract_test_fn() + if test_fn is None: + return + if not hasattr(test_fn, "tracked_input"): + return None + test_fn.tracked_input = None + +# Wraps an iterator and tracks the most recent value the iterator produces +# for debugging purposes. Tracked values are stored on the test function. +class TrackedInputIter: + def __init__(self, child_iter, input_type_desc, + callback=lambda x: x, set_seed=True, restrict_to_index=None): + self.child_iter = enumerate(child_iter) + # Input type describes the things we're tracking (e.g. "sample input", "error input"). + self.input_type_desc = input_type_desc + # Callback is run on each iterated thing to get the thing to track. + self.callback = callback + self.test_fn = extract_test_fn() + # Indicates whether the random seed should be set before each call to the iterator + self.set_seed = set_seed + # Indicates that iteration should be restricted to only the provided index. + # If None, no restriction is done + self.restrict_to_index = restrict_to_index + + def __iter__(self): + return self + + def __next__(self): + while True: + if self.set_seed: + # use a test-name-specific hash for the seed if possible + seed = ( + int.from_bytes(hashlib.sha256( + self.test_fn.__qualname__.encode("utf-8")).digest()[:4], 'little') + if self.test_fn is not None else SEED + ) + set_rng_seed(seed) + + # allow StopIteration to bubble up + input_idx, input_val = next(self.child_iter) + if (self.restrict_to_index is None) or (input_idx == self.restrict_to_index): + break + + self._set_tracked_input( + TrackedInput( + index=input_idx, val=self.callback(input_val), type_desc=self.input_type_desc + ) + ) + return input_val + + def _set_tracked_input(self, tracked_input: TrackedInput): + if self.test_fn is None: + return + if not hasattr(self.test_fn, "tracked_input"): + return + self.test_fn.tracked_input = tracked_input + +class _TestParametrizer: + """ + Decorator class for parametrizing a test function, yielding a set of new tests spawned + from the original generic test, each specialized for a specific set of test inputs. For + example, parametrizing a test across the set of ops will result in a test function per op. + + The decision of how to parametrize / what to parametrize over is intended to be implemented + by each derived class. + + In the details, the decorator adds a 'parametrize_fn' property to the test function. This function + is intended to be called later by one of: + * Device-specific test instantiation via instantiate_device_type_tests(). Note that for this + case there is no need to explicitly parametrize over device type, as that is handled separately. + * Device-agnostic parametrized test instantiation via instantiate_parametrized_tests(). + + If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new + composite 'parametrize_fn' will be created that generates tests with the product of the parameters + generated by the old and new parametrize_fns. This allows for convenient composability of decorators. + """ + def _parametrize_test(self, test, generic_cls, device_cls): + """ + Parametrizes the given test function across whatever dimension is specified by the derived class. + Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all + ops, all modules, or all ops + their associated dtypes. + + Args: + test (fn): Test function to parametrize over + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None + if the tests are not part of a device-specific set + + Returns: + Generator object returning 4-tuples of: + test (fn): Parametrized test function; must support a device arg and args for any params + test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to + the base name of the test + param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64}) + decorator_fn (callable): Callable[[Dict], List] for list of decorators to apply given param_kwargs + """ + raise NotImplementedError + + def __call__(self, fn): + if hasattr(fn, 'parametrize_fn'): + # Do composition with the product of args. + old_parametrize_fn = fn.parametrize_fn + new_parametrize_fn = self._parametrize_test + fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn) + else: + fn.parametrize_fn = self._parametrize_test + return fn + + +def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn): + """ + Returns a parametrize_fn that parametrizes over the product of the parameters handled + by the given parametrize_fns. Each given parametrize_fn should each have the signature + f(test, generic_cls, device_cls). + + The test names will be a combination of the names produced by the parametrize_fns in + "_" order. This order is done to match intuition for constructed names + when composing multiple decorators; the names will be built in top to bottom order when stacking + parametrization decorators. + + Args: + old_parametrize_fn (callable) - First parametrize_fn to compose. + new_parametrize_fn (callable) - Second parametrize_fn to compose. + """ + + def composite_fn(test, generic_cls, device_cls, + old_parametrize_fn=old_parametrize_fn, + new_parametrize_fn=new_parametrize_fn): + old_tests = list(old_parametrize_fn(test, generic_cls, device_cls)) + for (old_test, old_test_name, old_param_kwargs, old_dec_fn) in old_tests: + for (new_test, new_test_name, new_param_kwargs, new_dec_fn) in \ + new_parametrize_fn(old_test, generic_cls, device_cls): + redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys()) + if redundant_params: + raise RuntimeError('Parametrization over the same parameter by multiple parametrization ' + f'decorators is not supported. For test "{test.__name__}", the following parameters ' + f'are handled multiple times: {redundant_params}') + full_param_kwargs = {**old_param_kwargs, **new_param_kwargs} + merged_test_name = '{}{}{}'.format(new_test_name, + '_' if old_test_name != '' and new_test_name != '' else '', + old_test_name) + + def merged_decorator_fn(param_kwargs, old_dec_fn=old_dec_fn, new_dec_fn=new_dec_fn): + return list(old_dec_fn(param_kwargs)) + list(new_dec_fn(param_kwargs)) + + yield (new_test, merged_test_name, full_param_kwargs, merged_decorator_fn) + + return composite_fn + + +def instantiate_parametrized_tests(generic_cls): + """ + Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a + decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by + parametrized tests with specialized names. This should be used instead of + instantiate_device_type_tests() if the test class contains device-agnostic tests. + + You can also use it as a class decorator. E.g. + + ``` + @instantiate_parametrized_tests + class TestFoo(TestCase): + ... + ``` + + Args: + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + """ + for attr_name in tuple(dir(generic_cls)): + class_attr = getattr(generic_cls, attr_name) + if not hasattr(class_attr, 'parametrize_fn'): + continue + + # Remove the generic test from the test class. + delattr(generic_cls, attr_name) + + # Add parametrized tests to the test class. + def instantiate_test_helper(cls, name, test, param_kwargs): + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + test(self, **param_kwargs) + + assert not hasattr(generic_cls, name), f"Redefinition of test {name}" + setattr(generic_cls, name, instantiated_test) + + for (test, test_suffix, param_kwargs, decorator_fn) in class_attr.parametrize_fn( + class_attr, generic_cls=generic_cls, device_cls=None): + full_name = f'{test.__name__}_{test_suffix}' + + # Apply decorators based on full param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs) + return generic_cls + + +class subtest: + """ + Explicit subtest case for use with test parametrization. + Allows for explicit naming of individual subtest cases as well as applying + decorators to the parametrized test. + + Args: + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name (str): Optional name to use for the test. + decorators (iterable): Iterable of decorators to apply to the generated test. + """ + __slots__ = ['arg_values', 'name', 'decorators'] + + def __init__(self, arg_values, name=None, decorators=None): + self.arg_values = arg_values + self.name = name + self.decorators = decorators if decorators else [] + + +class parametrize(_TestParametrizer): + """ + Decorator for applying generic test parametrizations. + + The interface for this decorator is modeled after `@pytest.mark.parametrize`. + Basic usage between this decorator and pytest's is identical. The first argument + should be a string containing comma-separated names of parameters for the test, and + the second argument should be an iterable returning values or tuples of values for + the case of multiple parameters. + + Beyond this basic usage, the decorator provides some additional functionality that + pytest does not. + + 1. Parametrized tests end up as generated test functions on unittest test classes. + Since this differs from how pytest works, this decorator takes on the additional + responsibility of naming these test functions. The default test names consists of + the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"), + but custom names can be defined using `name_fn` or the `subtest` structure (see below). + + 2. The decorator specially handles parameter values of type `subtest`, which allows for + more fine-grained control over both test naming and test execution. In particular, it can + be used to tag subtests with explicit test names or apply arbitrary decorators (see examples + below). + + Examples:: + + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + def test_bar(self, x, y): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')], + name_fn=lambda x, y: '{}_{}'.format(x, y)) + def test_bar_custom_names(self, x, y): + ... + + @parametrize("x, y", [subtest((1, 2), name='double'), + subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]), + subtest((1, 4), name='quadruple')]) + def test_baz(self, x, y): + ... + + To actually instantiate the parametrized tests, one of instantiate_parametrized_tests() or + instantiate_device_type_tests() should be called. The former is intended for test classes + that contain device-agnostic tests, while the latter should be used for test classes that + contain device-specific tests. Both support arbitrary parametrizations using the decorator. + + Args: + arg_str (str): String of arg names separate by commas (e.g. "x,y"). + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name_fn (Callable): Optional function that takes in parameters and returns subtest name. + """ + def __init__(self, arg_str, arg_values, name_fn=None): + self.arg_names: List[str] = [s.strip() for s in arg_str.split(',') if s != ''] + self.arg_values = arg_values + self.name_fn = name_fn + + def _formatted_str_repr(self, idx, name, value): + """ Returns a string representation for the given arg that is suitable for use in test function names. """ + if isinstance(value, torch.dtype): + return dtype_name(value) + elif isinstance(value, torch.device): + return str(value) + # Can't use isinstance as it would cause a circular import + elif type(value).__name__ in {'OpInfo', 'ModuleInfo'}: + return value.formatted_name + elif isinstance(value, (int, float, str)): + return f"{name}_{str(value).replace('.', '_')}" + else: + return f"{name}{idx}" + + def _default_subtest_name(self, idx, values): + return '_'.join([self._formatted_str_repr(idx, a, v) for a, v in zip(self.arg_names, values)]) + + def _get_subtest_name(self, idx, values, explicit_name=None): + if explicit_name: + subtest_name = explicit_name + elif self.name_fn: + subtest_name = self.name_fn(*values) + else: + subtest_name = self._default_subtest_name(idx, values) + return subtest_name + + def _parametrize_test(self, test, generic_cls, device_cls): + if len(self.arg_names) == 0: + # No additional parameters needed for the test. + test_name = '' + yield (test, test_name, {}, lambda _: []) + else: + # Each "values" item is expected to be either: + # * A tuple of values with one for each arg. For a single arg, a single item is expected. + # * A subtest instance with arg_values matching the previous. + values = check_exhausted_iterator = object() + for idx, values in enumerate(self.arg_values): + maybe_name = None + + decorators = [] + if isinstance(values, subtest): + sub = values + values = sub.arg_values + maybe_name = sub.name + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorators = sub.decorators + gen_test = test_wrapper + else: + gen_test = test + + values = list(values) if len(self.arg_names) > 1 else [values] + if len(values) != len(self.arg_names): + raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' + f'values and {len(self.arg_names)} names for test "{test.__name__}"') + + param_kwargs = dict(zip(self.arg_names, values)) + + test_name = self._get_subtest_name(idx, values, explicit_name=maybe_name) + + def decorator_fn(_, decorators=decorators): + return decorators + + yield (gen_test, test_name, param_kwargs, decorator_fn) + + if values is check_exhausted_iterator: + raise ValueError(f'{test}: An empty arg_values was passed to @parametrize. ' + 'Note that this may result from reuse of a generator.') + + +class decorateIf(_TestParametrizer): + """ + Decorator for applying parameter-specific conditional decoration. + Composes with other test parametrizers (e.g. @modules, @ops, @parametrize, etc.). + + Examples:: + + @decorateIf(unittest.skip, lambda params: params["x"] == 2) + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + @decorateIf( + unittest.expectedFailure, + lambda params: params["x"] == 3 and params["y"] == "baz" + ) + def test_bar(self, x, y): + ... + + @decorateIf( + unittest.expectedFailure, + lambda params: params["op"].name == "add" and params["dtype"] == torch.float16 + ) + @ops(op_db) + def test_op_foo(self, device, dtype, op): + ... + + @decorateIf( + unittest.skip, + lambda params: params["module_info"].module_cls is torch.nn.Linear and \ + params["device"] == "cpu" + ) + @modules(module_db) + def test_module_foo(self, device, dtype, module_info): + ... + + Args: + decorator: Test decorator to apply if the predicate is satisfied. + predicate_fn (Callable): Function taking in a dict of params and returning a boolean + indicating whether the decorator should be applied or not. + """ + def __init__(self, decorator, predicate_fn): + self.decorator = decorator + self.predicate_fn = predicate_fn + + def _parametrize_test(self, test, generic_cls, device_cls): + + # Leave test as-is and return the appropriate decorator_fn. + def decorator_fn(params, decorator=self.decorator, predicate_fn=self.predicate_fn): + if predicate_fn(params): + return [decorator] + else: + return [] + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + test_name = '' + yield (test_wrapper, test_name, {}, decorator_fn) + + +class ProfilingMode(Enum): + LEGACY = 1 + SIMPLE = 2 + PROFILING = 3 + +def cppProfilingFlagsToProfilingMode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + + if old_prof_exec_state: + if old_prof_mode_state: + return ProfilingMode.PROFILING + else: + return ProfilingMode.SIMPLE + else: + return ProfilingMode.LEGACY + +@contextmanager +def enable_profiling_mode_for_profiling_tests(): + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def enable_profiling_mode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def num_profiled_runs(num_runs): + old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs) + try: + yield + finally: + torch._C._jit_set_num_profiled_runs(old_num_runs) + +func_call = torch._C.ScriptFunction.__call__ +meth_call = torch._C.ScriptMethod.__call__ + +def prof_callable(callable, *args, **kwargs): + if 'profile_and_replay' in kwargs: + del kwargs['profile_and_replay'] + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + with enable_profiling_mode_for_profiling_tests(): + callable(*args, **kwargs) + return callable(*args, **kwargs) + + return callable(*args, **kwargs) + +def prof_func_call(*args, **kwargs): + return prof_callable(func_call, *args, **kwargs) + +def prof_meth_call(*args, **kwargs): + return prof_callable(meth_call, *args, **kwargs) + +torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[method-assign] +torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[method-assign] + +def _get_test_report_path(): + # allow users to override the test file location. We need this + # because the distributed tests run the same test file multiple + # times with different configurations. + override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE') + test_source = override if override is not None else 'python-unittest' + return os.path.join('test-reports', test_source) + +is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "") +parser = argparse.ArgumentParser(add_help=not is_running_via_run_test, allow_abbrev=False) +parser.add_argument('--subprocess', action='store_true', + help='whether to run each test in a subprocess') +parser.add_argument('--seed', type=int, default=1234) +parser.add_argument('--accept', action='store_true') +parser.add_argument('--jit-executor', '--jit_executor', type=str) +parser.add_argument('--repeat', type=int, default=1) +parser.add_argument('--test-bailouts', '--test_bailouts', action='store_true') +parser.add_argument('--use-pytest', action='store_true') +parser.add_argument('--save-xml', nargs='?', type=str, + const=_get_test_report_path(), + default=_get_test_report_path() if IS_CI else None) +parser.add_argument('--discover-tests', action='store_true') +parser.add_argument('--log-suffix', type=str, default="") +parser.add_argument('--run-parallel', type=int, default=1) +parser.add_argument('--import-slow-tests', type=str, nargs='?', const=DEFAULT_SLOW_TESTS_FILE) +parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DEFAULT_DISABLED_TESTS_FILE) +parser.add_argument('--rerun-disabled-tests', action='store_true') +parser.add_argument('--pytest-single-test', type=str, nargs=1) +if sys.version_info >= (3, 9): + parser.add_argument('--showlocals', action=argparse.BooleanOptionalAction, default=False) +else: + parser.add_argument('--showlocals', action='store_true', default=False) + parser.add_argument('--no-showlocals', dest='showlocals', action='store_false') + +# Only run when -h or --help flag is active to display both unittest and parser help messages. +def run_unittest_help(argv): + unittest.main(argv=argv) + +if '-h' in sys.argv or '--help' in sys.argv: + help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,)) + help_thread.start() + help_thread.join() + +args, remaining = parser.parse_known_args() +if args.jit_executor == 'legacy': + GRAPH_EXECUTOR = ProfilingMode.LEGACY +elif args.jit_executor == 'profiling': + GRAPH_EXECUTOR = ProfilingMode.PROFILING +elif args.jit_executor == 'simple': + GRAPH_EXECUTOR = ProfilingMode.SIMPLE +else: + # infer flags based on the default settings + GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode() + +RERUN_DISABLED_TESTS = args.rerun_disabled_tests + +SLOW_TESTS_FILE = args.import_slow_tests +DISABLED_TESTS_FILE = args.import_disabled_tests +LOG_SUFFIX = args.log_suffix +RUN_PARALLEL = args.run_parallel +TEST_BAILOUTS = args.test_bailouts +USE_PYTEST = args.use_pytest +PYTEST_SINGLE_TEST = args.pytest_single_test +TEST_DISCOVER = args.discover_tests +TEST_IN_SUBPROCESS = args.subprocess +TEST_SAVE_XML = args.save_xml +REPEAT_COUNT = args.repeat +SEED = args.seed +SHOWLOCALS = args.showlocals +if not getattr(expecttest, "ACCEPT", False): + expecttest.ACCEPT = args.accept +UNITTEST_ARGS = [sys.argv[0]] + remaining +torch.manual_seed(SEED) + +# CI Prefix path used only on CI environment +CI_TEST_PREFIX = str(Path(os.getcwd())) +CI_PT_ROOT = str(Path(os.getcwd()).parent) +CI_FUNCTORCH_ROOT = str(os.path.join(Path(os.getcwd()).parent, "functorch")) + +def wait_for_process(p, timeout=None): + try: + return p.wait(timeout=timeout) + except KeyboardInterrupt: + # Give `p` a chance to handle KeyboardInterrupt. Without this, + # `pytest` can't print errors it collected so far upon KeyboardInterrupt. + exit_status = p.wait(timeout=5) + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except subprocess.TimeoutExpired: + # send SIGINT to give pytest a chance to make xml + p.send_signal(signal.SIGINT) + exit_status = None + try: + exit_status = p.wait(timeout=5) + # try to handle the case where p.wait(timeout=5) times out as well as + # otherwise the wait() call in the finally block can potentially hang + except subprocess.TimeoutExpired: + pass + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except: # noqa: B001,E722, copied from python core library + p.kill() + raise + finally: + # Always call p.wait() to ensure exit + p.wait() + +def shell(command, cwd=None, env=None, stdout=None, stderr=None, timeout=None): + sys.stdout.flush() + sys.stderr.flush() + # The following cool snippet is copied from Py3 core library subprocess.call + # only the with + # 1. `except KeyboardInterrupt` block added for SIGINT handling. + # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do + # `p.wait()` in a `final` block for the code to be portable. + # + # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 + assert not isinstance(command, str), "Command to shell should be a list or tuple of tokens" + p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env, stdout=stdout, stderr=stderr) + return wait_for_process(p, timeout=timeout) + + +def retry_shell( + command, + cwd=None, + env=None, + stdout=None, + stderr=None, + timeout=None, + retries=1, + was_rerun=False, +) -> Tuple[int, bool]: + # Returns exicode + whether it was rerun + assert ( + retries >= 0 + ), f"Expecting non negative number for number of retries, got {retries}" + try: + exit_code = shell( + command, cwd=cwd, env=env, stdout=stdout, stderr=stderr, timeout=timeout + ) + if exit_code == 0 or retries == 0: + return exit_code, was_rerun + print( + f"Got exit code {exit_code}, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + except subprocess.TimeoutExpired: + if retries == 0: + print( + f"Command took >{timeout // 60}min, returning 124", + file=stdout, + flush=True, + ) + return 124, was_rerun + print( + f"Command took >{timeout // 60}min, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + return retry_shell( + command, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + timeout=timeout, + retries=retries - 1, + was_rerun=True, + ) + + +def discover_test_cases_recursively(suite_or_case): + if isinstance(suite_or_case, unittest.TestCase): + return [suite_or_case] + rc = [] + for element in suite_or_case: + print(element) + rc.extend(discover_test_cases_recursively(element)) + return rc + +def get_test_names(test_cases): + return ['.'.join(case.id().split('.')[-2:]) for case in test_cases] + +def _print_test_names(): + suite = unittest.TestLoader().loadTestsFromModule(__main__) + test_cases = discover_test_cases_recursively(suite) + for name in get_test_names(test_cases): + print(name) + +def chunk_list(lst, nchunks): + return [lst[i::nchunks] for i in range(nchunks)] + +# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api +def sanitize_test_filename(filename): + # inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed + if filename.startswith(CI_TEST_PREFIX): + filename = filename[len(CI_TEST_PREFIX) + 1:] + strip_py = re.sub(r'.py$', '', filename) + return re.sub('/', r'.', strip_py) + +def lint_test_case_extension(suite): + succeed = True + for test_case_or_suite in suite: + test_case = test_case_or_suite + if isinstance(test_case_or_suite, unittest.TestSuite): + first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None + if first_test is not None and isinstance(first_test, unittest.TestSuite): + return succeed and lint_test_case_extension(test_case_or_suite) + test_case = first_test + + if test_case is not None: + test_class = test_case.id().split('.', 1)[1].split('.')[0] + if not isinstance(test_case, TestCase): + err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't." + print(f"{test_class} - failed. {err}") + succeed = False + return succeed + + +def get_report_path(argv=UNITTEST_ARGS, pytest=False): + test_filename = sanitize_test_filename(argv[0]) + test_report_path = TEST_SAVE_XML + LOG_SUFFIX + test_report_path = os.path.join(test_report_path, test_filename) + if pytest: + test_report_path = test_report_path.replace('python-unittest', 'python-pytest') + os.makedirs(test_report_path, exist_ok=True) + test_report_path = os.path.join(test_report_path, f"{test_filename}-{os.urandom(8).hex()}.xml") + return test_report_path + os.makedirs(test_report_path, exist_ok=True) + return test_report_path + + +def sanitize_pytest_xml(xml_file: str): + # pytext xml is different from unittext xml, this function makes pytest xml more similar to unittest xml + # consider somehow modifying the XML logger in conftest to do this instead + import xml.etree.ElementTree as ET + tree = ET.parse(xml_file) + for testcase in tree.iter('testcase'): + full_classname = testcase.attrib.get("classname") + if full_classname is None: + continue + # The test prefix is optional + regex_result = re.search(r"^(test\.)?(?P.*)\.(?P[^\.]*)$", full_classname) + if regex_result is None: + continue + classname = regex_result.group("classname") + file = regex_result.group("file").replace(".", "/") + testcase.set("classname", classname) + testcase.set("file", f"{file}.py") + tree.write(xml_file) + + +def get_pytest_test_cases(argv: List[str]) -> List[str]: + class TestCollectorPlugin: + def __init__(self) -> None: + self.tests = [] + + def pytest_collection_finish(self, session): + for item in session.items: + self.tests.append(session.config.cwd_relative_nodeid(item.nodeid)) + + test_collector_plugin = TestCollectorPlugin() + import pytest + pytest.main( + [arg for arg in argv if arg != '-vv'] + ['--collect-only', '-qq', '--use-main-module'], + plugins=[test_collector_plugin] + ) + return test_collector_plugin.tests + + +def run_tests(argv=UNITTEST_ARGS): + # import test files. + if SLOW_TESTS_FILE: + if os.path.exists(SLOW_TESTS_FILE): + with open(SLOW_TESTS_FILE) as fp: + global slow_tests_dict + slow_tests_dict = json.load(fp) + # use env vars so pytest-xdist subprocesses can still access them + os.environ['SLOW_TESTS_FILE'] = SLOW_TESTS_FILE + else: + warnings.warn(f'slow test file provided but not found: {SLOW_TESTS_FILE}') + if DISABLED_TESTS_FILE: + if os.path.exists(DISABLED_TESTS_FILE): + with open(DISABLED_TESTS_FILE) as fp: + global disabled_tests_dict + disabled_tests_dict = json.load(fp) + os.environ['DISABLED_TESTS_FILE'] = DISABLED_TESTS_FILE + else: + warnings.warn(f'disabled test file provided but not found: {DISABLED_TESTS_FILE}') + # Determine the test launch mechanism + if TEST_DISCOVER: + _print_test_names() + return + + # Before running the tests, lint to check that every test class extends from TestCase + suite = unittest.TestLoader().loadTestsFromModule(__main__) + if not lint_test_case_extension(suite): + sys.exit(1) + + if SHOWLOCALS: + argv = [ + argv[0], + *(["--showlocals", "--tb=long", "--color=yes"] if USE_PYTEST else ["--locals"]), + *argv[1:], + ] + + if TEST_IN_SUBPROCESS: + other_args = [] + if DISABLED_TESTS_FILE: + other_args.append("--import-disabled-tests") + if SLOW_TESTS_FILE: + other_args.append("--import-slow-tests") + if USE_PYTEST: + other_args.append("--use-pytest") + if RERUN_DISABLED_TESTS: + other_args.append("--rerun-disabled-tests") + if TEST_SAVE_XML: + other_args += ['--save-xml', args.save_xml] + + test_cases = ( + get_pytest_test_cases(argv) if USE_PYTEST else + [case.id().split('.', 1)[1] for case in discover_test_cases_recursively(suite)] + ) + + failed_tests = [] + + for test_case_full_name in test_cases: + + cmd = ( + [sys.executable] + [argv[0]] + other_args + argv[1:] + + (["--pytest-single-test"] if USE_PYTEST else []) + + [test_case_full_name] + ) + string_cmd = " ".join(cmd) + + timeout = None if RERUN_DISABLED_TESTS else 15 * 60 + + exitcode, _ = retry_shell(cmd, timeout=timeout, retries=0 if RERUN_DISABLED_TESTS else 1) + + if exitcode != 0: + # This is sort of hacky, but add on relevant env variables for distributed tests. + if 'TestDistBackendWithSpawn' in test_case_full_name: + backend = os.environ.get("BACKEND", "") + world_size = os.environ.get("WORLD_SIZE", "") + env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}" + string_cmd = env_prefix + " " + string_cmd + # Log the command to reproduce the failure. + print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}") + failed_tests.append(test_case_full_name) + + assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format( + len(failed_tests), '\n\t'.join(failed_tests)) + + elif RUN_PARALLEL > 1: + test_cases = discover_test_cases_recursively(suite) + test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL) + processes = [] + for i in range(RUN_PARALLEL): + command = [sys.executable] + argv + [f'--log-suffix=-shard-{i + 1}'] + test_batches[i] + processes.append(subprocess.Popen(command, universal_newlines=True)) + failed = False + for p in processes: + failed |= wait_for_process(p) != 0 + assert not failed, "Some test shards have failed" + elif USE_PYTEST: + pytest_args = argv + ["--use-main-module"] + if TEST_SAVE_XML: + test_report_path = get_report_path(pytest=True) + print(f'Test results will be stored in {test_report_path}') + pytest_args.append(f'--junit-xml-reruns={test_report_path}') + if PYTEST_SINGLE_TEST: + pytest_args = PYTEST_SINGLE_TEST + pytest_args[1:] + + import pytest + os.environ["NO_COLOR"] = "1" + exit_code = pytest.main(args=pytest_args) + if TEST_SAVE_XML: + sanitize_pytest_xml(test_report_path) + + if not RERUN_DISABLED_TESTS: + # exitcode of 5 means no tests were found, which happens since some test configs don't + # run tests from certain files + sys.exit(0 if exit_code == 5 else exit_code) + else: + # Only record the test report and always return a success code when running under rerun + # disabled tests mode + sys.exit(0) + elif TEST_SAVE_XML is not None: + # import here so that non-CI doesn't need xmlrunner installed + import xmlrunner # type: ignore[import] + from xmlrunner.result import _XMLTestResult # type: ignore[import] + + class XMLTestResultVerbose(_XMLTestResult): + """ + Adding verbosity to test outputs: + by default test summary prints 'skip', + but we want to also print the skip reason. + GH issue: https://github.com/pytorch/pytorch/issues/69014 + + This works with unittest_xml_reporting<=3.2.0,>=2.0.0 + (3.2.0 is latest at the moment) + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def addSkip(self, test, reason): + super().addSkip(test, reason) + for c in self.callback.__closure__: + if isinstance(c.cell_contents, str) and c.cell_contents == 'skip': + # this message is printed in test summary; + # it stands for `verbose_str` captured in the closure + c.cell_contents = f"skip: {reason}" + + def printErrors(self) -> None: + super().printErrors() + self.printErrorList("XPASS", self.unexpectedSuccesses) + test_report_path = get_report_path() + verbose = '--verbose' in argv or '-v' in argv + if verbose: + print(f'Test results will be stored in {test_report_path}') + unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner( + output=test_report_path, + verbosity=2 if verbose else 1, + resultclass=XMLTestResultVerbose)) + elif REPEAT_COUNT > 1: + for _ in range(REPEAT_COUNT): + if not unittest.main(exit=False, argv=argv).result.wasSuccessful(): + sys.exit(-1) + else: + unittest.main(argv=argv) + +IS_LINUX = sys.platform == "linux" +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" +IS_PPC = platform.machine() == "ppc64le" +IS_X86 = platform.machine() in ('x86_64', 'i386') +IS_ARM64 = platform.machine() in ('arm64', 'aarch64') + +def is_avx512_vnni_supported(): + if sys.platform != 'linux': + return False + with open("/proc/cpuinfo", encoding="ascii") as f: + lines = f.read() + return "vnni" in lines + +IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported() + +if IS_WINDOWS: + @contextmanager + def TemporaryFileName(*args, **kwargs): + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + if 'delete' in kwargs: + if kwargs['delete'] is not False: + raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.") + else: + kwargs['delete'] = False + f = tempfile.NamedTemporaryFile(*args, **kwargs) + try: + f.close() + yield f.name + finally: + os.unlink(f.name) +else: + @contextmanager # noqa: T484 + def TemporaryFileName(*args, **kwargs): + with tempfile.NamedTemporaryFile(*args, **kwargs) as f: + yield f.name + +if IS_WINDOWS: + @contextmanager + def TemporaryDirectoryName(suffix=None): + # On Windows the directory created by TemporaryDirectory is likely to be removed prematurely, + # so we first create the directory using mkdtemp and then remove it manually + try: + dir_name = tempfile.mkdtemp(suffix=suffix) + yield dir_name + finally: + shutil.rmtree(dir_name) +else: + @contextmanager # noqa: T484 + def TemporaryDirectoryName(suffix=None): + with tempfile.TemporaryDirectory(suffix=suffix) as d: + yield d + + +def is_privateuse1_backend_available(): + privateuse1_backend_name = torch._C._get_privateuse1_backend_name() + privateuse1_backend_module = getattr(torch, privateuse1_backend_name, None) + return hasattr(privateuse1_backend_module, "is_available") and privateuse1_backend_module.is_available() + + +IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8' + +TEST_NUMPY = _check_module_exists('numpy') +TEST_FAIRSEQ = _check_module_exists('fairseq') +TEST_SCIPY = _check_module_exists('scipy') +TEST_MKL = torch.backends.mkl.is_available() +TEST_MPS = torch.backends.mps.is_available() +TEST_XPU = torch.xpu.is_available() +TEST_HPU = True if (hasattr(torch, "hpu") and torch.hpu.is_available()) else False +TEST_CUDA = torch.cuda.is_available() +custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None) +TEST_PRIVATEUSE1 = is_privateuse1_backend_available() +TEST_PRIVATEUSE1_DEVICE_TYPE = torch._C._get_privateuse1_backend_name() +TEST_NUMBA = _check_module_exists('numba') +TEST_TRANSFORMERS = _check_module_exists('transformers') +TEST_DILL = _check_module_exists('dill') + +TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64 + +TEST_OPT_EINSUM = _check_module_exists('opt_einsum') + +TEST_Z3 = _check_module_exists('z3') + +def split_if_not_empty(x: str): + return x.split(",") if len(x) != 0 else [] + +NOTEST_CPU = "cpu" in split_if_not_empty(os.getenv('PYTORCH_TESTING_DEVICE_EXCEPT_FOR', '')) + +skipIfNoDill = unittest.skipIf(not TEST_DILL, "no dill") + + +# Python 2.7 doesn't have spawn +NO_MULTIPROCESSING_SPAWN: bool = TestEnvironment.def_flag( + "NO_MULTIPROCESSING_SPAWN", + env_var="NO_MULTIPROCESSING_SPAWN", +) +TEST_WITH_ASAN: bool = TestEnvironment.def_flag( + "TEST_WITH_ASAN", + env_var="PYTORCH_TEST_WITH_ASAN", +) +TEST_WITH_DEV_DBG_ASAN: bool = TestEnvironment.def_flag( + "TEST_WITH_DEV_DBG_ASAN", + env_var="PYTORCH_TEST_WITH_DEV_DBG_ASAN", +) +TEST_WITH_TSAN: bool = TestEnvironment.def_flag( + "TEST_WITH_TSAN", + env_var="PYTORCH_TEST_WITH_TSAN", +) +TEST_WITH_UBSAN: bool = TestEnvironment.def_flag( + "TEST_WITH_UBSAN", + env_var="PYTORCH_TEST_WITH_UBSAN", +) +TEST_WITH_ROCM: bool = TestEnvironment.def_flag( + "TEST_WITH_ROCM", + env_var="PYTORCH_TEST_WITH_ROCM", +) + +# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen +# See #64427 +TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1' +# Enables tests that are slow to run (disabled by default) +TEST_WITH_SLOW: bool = TestEnvironment.def_flag( + "TEST_WITH_SLOW", + env_var="PYTORCH_TEST_WITH_SLOW", +) + +# Disables non-slow tests (these tests enabled by default) +# This is usually used in conjunction with TEST_WITH_SLOW to +# run *only* slow tests. (I could have done an enum, but +# it felt a little awkward. +TEST_SKIP_FAST: bool = TestEnvironment.def_flag( + "TEST_SKIP_FAST", + env_var="PYTORCH_TEST_SKIP_FAST", +) + +# Enables crossref tests, in addition to standard tests which +# are being run. crossref tests work by installing a torch +# function mode that runs extra compute alongside the regular +# computation that happens with the test. After both computations +# are done, we cross-reference them (thus the name) to check for +# correction, before throwing out the extra compute and proceeding +# as we had before. By default, we don't run these tests. +TEST_WITH_CROSSREF: bool = TestEnvironment.def_flag( + "TEST_WITH_CROSSREF", + env_var="PYTORCH_TEST_WITH_CROSSREF", +) + +TEST_SKIP_CUDAGRAPH: bool = TestEnvironment.def_flag( + "TEST_SKIP_CUDAGRAPH", + env_var="PYTORCH_TEST_SKIP_CUDAGRAPH", +) +TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( + (torch.version.cuda and int(torch.version.cuda.split(".")[0]) >= 11) or + (torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3) +) + +TEST_CUDA_CUDSS = TEST_CUDA and (torch.version.cuda and int(torch.version.cuda.split(".")[0]) >= 12) + +def allocator_option_enabled_fn(allocator_config, _, option): + if allocator_config is None: + return False + allocator_config = allocator_config.split(',') if ',' in allocator_config else [allocator_config] + mapping = dict([var.split(':') for var in allocator_config]) + + if option in mapping and mapping[option] == 'True': + return True + else: + return False + +EXPANDABLE_SEGMENTS: bool = TestEnvironment.def_flag( + "EXPANDABLE_SEGMENTS", + env_var="PYTORCH_CUDA_ALLOC_CONF", + enabled_fn=functools.partial(allocator_option_enabled_fn, option='expandable_segments'), +) + +if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ: + num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2")) + gb_available = torch.cuda.mem_get_info()[1] / 2 ** 30 + # other libraries take up about a little under 1 GB of space per process + torch.cuda.set_per_process_memory_fraction(round((gb_available - num_procs * .85) / gb_available / num_procs, 2)) + +requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "Requires CUDA") + +def skipIfCrossRef(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_CROSSREF: + raise unittest.SkipTest("test doesn't currently with crossref") + else: + fn(*args, **kwargs) + return wrapper + +class CrossRefMode(torch.overrides.TorchFunctionMode): + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + r = func(*args, **kwargs) + return r + +# Run PyTorch tests with TorchDynamo +TEST_WITH_TORCHINDUCTOR: bool = TestEnvironment.def_flag( + "TEST_WITH_TORCHINDUCTOR", + env_var="PYTORCH_TEST_WITH_INDUCTOR", +) +# AOT_EAGER not tested in ci, useful for debugging +TEST_WITH_AOT_EAGER: bool = TestEnvironment.def_flag( + "TEST_WITH_AOT_EAGER", + env_var="PYTORCH_TEST_WITH_AOT_EAGER", +) +TEST_WITH_TORCHDYNAMO: bool = TestEnvironment.def_flag( + "TEST_WITH_TORCHDYNAMO", + env_var="PYTORCH_TEST_WITH_DYNAMO", + implied_by_fn=lambda: TEST_WITH_TORCHINDUCTOR or TEST_WITH_AOT_EAGER, +) + +if TEST_WITH_TORCHDYNAMO: + import torch._dynamo + # Do not spend time on helper functions that are called with different inputs + torch._dynamo.config.accumulated_cache_size_limit = 64 + # Do not log compilation metrics from unit tests + torch._dynamo.config.log_compilation_metrics = False + if TEST_WITH_TORCHINDUCTOR: + import torch._inductor.config + torch._inductor.config.fallback_random = True + + +def xpassIfTorchDynamo(func): + return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func) + + +def xfailIfTorchDynamo(func): + return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func + + +def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"): + """ + Usage: + @skipIfTorchDynamo(msg) + def test_blah(self): + ... + """ + assert isinstance(msg, str), "Are you using skipIfTorchDynamo correctly?" + + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_TORCHDYNAMO: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if TEST_WITH_TORCHDYNAMO: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + +def skipIfTorchInductor(msg="test doesn't currently work with torchinductor", + condition=TEST_WITH_TORCHINDUCTOR): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if condition: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if condition: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + +def serialTest(condition=True): + """ + Decorator for running tests serially. Requires pytest + """ + def decorator(fn): + if has_pytest and condition: + return pytest.mark.serial(fn) + return fn + return decorator + +def unMarkDynamoStrictTest(cls=None): + def decorator(cls): + cls.dynamo_strict = False + return cls + + if cls is None: + return decorator + else: + return decorator(cls) + + +def markDynamoStrictTest(cls_or_func=None, nopython=False): + """ + Marks the test as 'strict'. In strict mode, we reset before and after the + test, and run without suppress errors. + + Args: + - nopython: if we should run torch._dynamo.optimize with nopython={True/False}. + """ + def decorator(cls_or_func): + if inspect.isclass(cls_or_func): + cls_or_func.dynamo_strict = True + cls_or_func.dynamo_strict_nopython = nopython + return cls_or_func + + fn = cls_or_func + + @wraps(fn) + def wrapper(*args, **kwargs): + torch._dynamo.reset() + with unittest.mock.patch("torch._dynamo.config.suppress_errors", False): + fn(*args, **kwargs) + torch._dynamo.reset() + return wrapper + + if cls_or_func is None: + return decorator + else: + return decorator(cls_or_func) + + +def skipRocmIfTorchInductor(msg="test doesn't currently work with torchinductor on the ROCm stack"): + return skipIfTorchInductor(msg=msg, condition=TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR) + +def skipIfLegacyJitExecutor(msg="test doesn't currently work with legacy JIT executor"): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert isinstance(fn, type) + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + + return decorator + + +# Run PyTorch tests with translation validation on. +TEST_WITH_TV = os.getenv('PYTORCH_TEST_WITH_TV') == '1' + +if TEST_WITH_TV: + torch.fx.experimental._config.translation_validation = True + +# Some tests take too long when dynamic_shapes is combined with +# translation_validation. Whenever that happens, we solve that by +# disabling translation_validation. +def disable_translation_validation_if_dynamic_shapes(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if torch._dynamo.config.dynamic_shapes: + # Turning TV off due to high latency on dynamic shapes. + torch.fx.experimental._config.translation_validation = False + return fn(*args, **kwargs) + return wrapper + + +# Determine whether to enable cuda memory leak check. +# CUDA mem leak check is expensive and thus we don't want to execute it on every +# test case / configuration. +# If this is True then CUDA memory leak checks are skipped. If this is false +# then CUDA memory leak checks are performed. +# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135 +TEST_CUDA_MEM_LEAK_CHECK: bool = TestEnvironment.def_flag( + "TEST_CUDA_MEM_LEAK_CHECK", + env_var="PYTORCH_TEST_CUDA_MEM_LEAK_CHECK", +) + + +# Dict of NumPy dtype -> torch dtype (when the correspondence exists) +numpy_to_torch_dtype_dict = { + np.bool_ : torch.bool, + np.uint8 : torch.uint8, + np.uint16 : torch.uint16, + np.uint32 : torch.uint32, + np.uint64 : torch.uint64, + np.int8 : torch.int8, + np.int16 : torch.int16, + np.int32 : torch.int32, + np.int64 : torch.int64, + np.float16 : torch.float16, + np.float32 : torch.float32, + np.float64 : torch.float64, + np.complex64 : torch.complex64, + np.complex128 : torch.complex128 +} + + +# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like +# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type. +# Especially when checking against a reference we can't be sure which variant we get, so we simply try both. +def numpy_to_torch_dtype(np_dtype): + try: + return numpy_to_torch_dtype_dict[np_dtype] + except KeyError: + return numpy_to_torch_dtype_dict[np_dtype.type] + + +def has_corresponding_torch_dtype(np_dtype): + try: + numpy_to_torch_dtype(np_dtype) + return True + except KeyError: + return False + + +if IS_WINDOWS: + # Size of `np.intc` is platform defined. + # It is returned by functions like `bitwise_not`. + # On Windows `int` is 32-bit + # https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160 + numpy_to_torch_dtype_dict[np.intc] = torch.int + +# Dict of torch dtype -> NumPy dtype +torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} +torch_to_numpy_dtype_dict.update({ + torch.bfloat16: np.float32, + torch.complex32: np.complex64 +}) + +def skipIfNNModuleInlined( + msg="test doesn't currently work with nn module inlining", + condition=torch._dynamo.config.inline_inbuilt_nn_modules, +): + def decorator(fn): + if not isinstance(fn, type): + + @wraps(fn) + def wrapper(*args, **kwargs): + if condition: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + + return wrapper + + assert isinstance(fn, type) + if condition: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + +def skipIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipIfRocm: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +def runOnRocm(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: + fn(*args, **kwargs) + else: + raise unittest.SkipTest("test currently only works on the ROCm stack") + return wrapper + +def runOnRocmArch(arch: Tuple[str, ...]): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if TEST_WITH_ROCM: + prop = torch.cuda.get_device_properties(0) + if prop.gcnArchName.split(":")[0] not in arch: + reason = f"skipIfRocm: test only runs on {arch}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + return wrap_fn + return dec_fn + +def skipIfXpu(func=None, *, msg="test doesn't currently work on the XPU stack"): + def dec_fn(fn): + reason = f"skipIfXpu: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_XPU: + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +def skipIfMps(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_MPS: + raise unittest.SkipTest("test doesn't currently work with MPS") + else: + fn(*args, **kwargs) + return wrapper + +def skipIfHpu(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_HPU: + raise unittest.SkipTest("test doesn't currently work with HPU") + else: + fn(*args, **kwargs) + return wrapper + +# Skips a test on CUDA if ROCm is available and its version is lower than requested. +def skipIfRocmVersionLessThan(version=None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if TEST_WITH_ROCM: + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + return wrap_fn + return dec_fn + +def skipIfNotMiopenSuggestNHWC(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_MIOPEN_SUGGEST_NHWC: + raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation") + else: + fn(*args, **kwargs) + return wrapper + +def skipIfWindows(func=None, *, msg="test doesn't currently work on the Windows stack"): + def dec_fn(fn): + reason = f"skipIfWindows: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if IS_WINDOWS: # noqa: F821 + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +# Reverts the linalg backend back to default to make sure potential failures in one +# test do not affect other tests +def setLinalgBackendsToDefaultFinally(fn): + @wraps(fn) + def _fn(*args, **kwargs): + _preferred_backend = torch.backends.cuda.preferred_linalg_library() + try: + fn(*args, **kwargs) + finally: + torch.backends.cuda.preferred_linalg_library(_preferred_backend) + return _fn + + +# Reverts the blas backend back to default to make sure potential failures in one +# test do not affect other tests +def setBlasBackendsToDefaultFinally(fn): + @wraps(fn) + def _fn(*args, **kwargs): + _preferred_backend = torch.backends.cuda.preferred_blas_library() + try: + fn(*args, **kwargs) + finally: + torch.backends.cuda.preferred_blas_library(_preferred_backend) + return _fn + + +# Context manager for setting deterministic flag and automatically +# resetting it to its original value +class DeterministicGuard: + def __init__(self, deterministic, *, warn_only=False, fill_uninitialized_memory=True): + self.deterministic = deterministic + self.warn_only = warn_only + self.fill_uninitialized_memory = fill_uninitialized_memory + + def __enter__(self): + self.deterministic_restore = torch.are_deterministic_algorithms_enabled() + self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled() + self.fill_uninitialized_memory_restore = torch.utils.deterministic.fill_uninitialized_memory + torch.use_deterministic_algorithms( + self.deterministic, + warn_only=self.warn_only) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory + + def __exit__(self, exception_type, exception_value, traceback): + torch.use_deterministic_algorithms( + self.deterministic_restore, + warn_only=self.warn_only_restore) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory_restore + +class AlwaysWarnTypedStorageRemoval: + def __init__(self, always_warn): + assert isinstance(always_warn, bool) + self.always_warn = always_warn + + def __enter__(self): + self.always_warn_restore = torch.storage._get_always_warn_typed_storage_removal() + torch.storage._set_always_warn_typed_storage_removal(self.always_warn) + + def __exit__(self, exception_type, exception_value, traceback): + torch.storage._set_always_warn_typed_storage_removal(self.always_warn_restore) + +# Context manager for setting cuda sync debug mode and reset it +# to original value +# we are not exposing it to the core because sync debug mode is +# global and thus not thread safe +class CudaSyncGuard: + def __init__(self, sync_debug_mode): + self.mode = sync_debug_mode + + def __enter__(self): + self.debug_mode_restore = torch.cuda.get_sync_debug_mode() + torch.cuda.set_sync_debug_mode(self.mode) + + def __exit__(self, exception_type, exception_value, traceback): + torch.cuda.set_sync_debug_mode(self.debug_mode_restore) + +# Context manager for setting torch.__future__.set_swap_module_params_on_conversion +# and automatically resetting it to its original value +class SwapTensorsGuard: + def __init__(self, use_swap_tensors): + self.use_swap_tensors = use_swap_tensors + + def __enter__(self): + self.swap_tensors_restore = torch.__future__.get_swap_module_params_on_conversion() + if self.use_swap_tensors is not None: + torch.__future__.set_swap_module_params_on_conversion(self.use_swap_tensors) + + def __exit__(self, exception_type, exception_value, traceback): + torch.__future__.set_swap_module_params_on_conversion(self.swap_tensors_restore) + +# This decorator can be used for API tests that call +# torch.use_deterministic_algorithms(). When the test is finished, it will +# restore the previous deterministic flag setting. +# +# If CUDA >= 10.2, this will set the environment variable +# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that +# setting is not thrown during the test unless the test changes that variable +# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be +# restored once the test is finished. +# +# Note that if a test requires CUDA to actually register the changed +# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because +# CUDA only checks the variable when the runtime initializes. Tests can be +# run inside a subprocess like so: +# +# import subprocess, sys, os +# script = ''' +# # Test code should go here +# ''' +# try: +# subprocess.check_output( +# [sys.executable, '-c', script], +# stderr=subprocess.STDOUT, +# cwd=os.path.dirname(os.path.realpath(__file__)), +# env=os.environ.copy()) +# except subprocess.CalledProcessError as e: +# error_message = e.output.decode('utf-8') +# # Handle exceptions raised by the subprocess here +# +def wrapDeterministicFlagAPITest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with DeterministicGuard( + torch.are_deterministic_algorithms_enabled(), + warn_only=torch.is_deterministic_algorithms_warn_only_enabled()): + class CuBLASConfigGuard: + cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' + + def __enter__(self): + self.is_cuda10_2_or_higher = ( + (torch.version.cuda is not None) + and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) + if self.is_cuda10_2_or_higher: + self.cublas_config_restore = os.environ.get(self.cublas_var_name) + os.environ[self.cublas_var_name] = ':4096:8' + + def __exit__(self, exception_type, exception_value, traceback): + if self.is_cuda10_2_or_higher: + cur_cublas_config = os.environ.get(self.cublas_var_name) + if self.cublas_config_restore is None: + if cur_cublas_config is not None: + del os.environ[self.cublas_var_name] + else: + os.environ[self.cublas_var_name] = self.cublas_config_restore + with CuBLASConfigGuard(): + fn(*args, **kwargs) + return wrapper + +# This decorator can be used for API tests that want to safely call +# torch.__future__.set_swap_module_params_on_conversion. `swap` can be set to +# True, False or None where None indicates that the context manager does not +# set the flag. When the test is finished, it will restore the previous swap +# flag setting. +def wrapSwapTensorsTest(swap=None): + def dec_fn(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with SwapTensorsGuard(swap): + fn(*args, **kwargs) + return wrapper + return dec_fn + +# test parametrizer for swapping +class swap(_TestParametrizer): + def __init__(self, swap_values): + super().__init__() + self.swap_values = swap_values + + def _parametrize_test(self, test, generic_cls, device_cls): + for swap in self.swap_values: + yield wrapSwapTensorsTest(swap)(test), f'swap_{swap}', {}, lambda _: [] + +def skipIfCompiledWithoutNumpy(fn): + # Even if the numpy module is present, if `USE_NUMPY=0` is used during the + # build, numpy tests will fail + numpy_support = TEST_NUMPY + if numpy_support: + try: + # The numpy module is present, verify that PyTorch is compiled with + # numpy support + torch.from_numpy(np.array([2, 2])) + except RuntimeError: + numpy_support = False + + @wraps(fn) + def wrapper(*args, **kwargs): + if not numpy_support: + raise unittest.SkipTest("PyTorch was compiled without numpy support") + else: + fn(*args, **kwargs) + return wrapper + +def _test_function(fn, device): + def run_test_function(self): + return fn(self, device) + return run_test_function + +def skipIfNoXNNPACK(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch.backends.xnnpack.enabled: + raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoLapack(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch._C.has_lapack: + raise unittest.SkipTest('PyTorch compiled without Lapack') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNotRegistered(op_name, message): + """Wraps the decorator to hide the import of the `core`. + + Args: + op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`. + message: message to fail with. + + Usage: + @skipIfNotRegistered('MyOp', 'MyOp is not linked!') + This will check if 'MyOp' is in the caffe2.python.core + """ + return unittest.skip("Pytorch is compiled without Caffe2") + +def skipIfNoSciPy(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_SCIPY: + raise unittest.SkipTest("test require SciPy, but SciPy not found") + else: + fn(*args, **kwargs) + return wrapper + +def skip_if_pytest(fn): + @wraps(fn) + def wrapped(*args, **kwargs): + if "PYTEST_CURRENT_TEST" in os.environ: + raise unittest.SkipTest("does not work under pytest") + return fn(*args, **kwargs) + + return wrapped + + +def slowTest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_SLOW: + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + else: + fn(*args, **kwargs) + wrapper.__dict__['slow_test'] = True + return wrapper + + +def slowTestIf(condition): + return slowTest if condition else lambda fn: fn + + +def skipCUDAMemoryLeakCheckIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True + fn._do_cuda_memory_leak_check = not condition + return fn + return dec + +def skipCUDANonDefaultStreamIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_non_default_stream', True): # if current True + fn._do_cuda_non_default_stream = not condition + return fn + return dec + +def suppress_warnings(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fn(*args, **kwargs) + return wrapper + + +def to_gpu(obj, type_map=None): + if type_map is None: + type_map = {} + if isinstance(obj, torch.Tensor): + assert obj.is_leaf + t = type_map.get(obj.dtype, obj.dtype) + with torch.no_grad(): + res = obj.clone().to(dtype=t, device="cuda") + res.requires_grad = obj.requires_grad + return res + elif torch.is_storage(obj): + return obj.new().resize_(obj.size()).copy_(obj) + elif isinstance(obj, list): + return [to_gpu(o, type_map) for o in obj] + elif isinstance(obj, tuple): + return tuple(to_gpu(o, type_map) for o in obj) + else: + return deepcopy(obj) + + +def get_function_arglist(func): + return inspect.getfullargspec(func).args + + +def set_rng_seed(seed): + torch.manual_seed(seed) + random.seed(seed) + if TEST_NUMPY: + np.random.seed(seed) + + +@contextlib.contextmanager +def set_default_dtype(dtype): + saved_dtype = torch.get_default_dtype() + torch.set_default_dtype(dtype) + try: + yield + finally: + torch.set_default_dtype(saved_dtype) + +@contextlib.contextmanager +def set_default_tensor_type(tensor_type): + saved_tensor_type = torch.tensor([]).type() + torch.set_default_tensor_type(tensor_type) + try: + yield + finally: + torch.set_default_tensor_type(saved_tensor_type) + +def iter_indices(tensor): + if tensor.dim() == 0: + return range(0) + if tensor.dim() == 1: + return range(tensor.size(0)) + return product(*(range(s) for s in tensor.size())) + + +def is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +def is_iterable_of_tensors(iterable, include_empty=False): + """ Returns True if iterable is an iterable of tensors and False o.w. + + If the iterable is empty, the return value is :attr:`include_empty` + """ + # Tensor itself is iterable so we check this first + if isinstance(iterable, torch.Tensor): + return False + + try: + if len(iterable) == 0: + return include_empty + + for t in iter(iterable): + if not isinstance(t, torch.Tensor): + return False + + except TypeError as te: + return False + + return True + + +class CudaNonDefaultStream: + def __enter__(self): + # Before starting CUDA test save currently active streams on all + # CUDA devices and set new non default streams to all CUDA devices + # to ensure CUDA tests do not use default stream by mistake. + beforeDevice = torch.cuda.current_device() + self.beforeStreams = [] + for d in range(torch.cuda.device_count()): + self.beforeStreams.append(torch.cuda.current_stream(d)) + deviceStream = torch.cuda.Stream(device=d) + self.beforeStreams[-1].synchronize() + torch._C._cuda_setStream(stream_id=deviceStream.stream_id, + device_index=deviceStream.device_index, + device_type=deviceStream.device_type) + torch._C._cuda_setDevice(beforeDevice) + + def __exit__(self, exec_type, exec_value, traceback): + # After completing CUDA test load previously active streams on all + # CUDA devices. + beforeDevice = torch.cuda.current_device() + for d in range(torch.cuda.device_count()): + torch._C._cuda_setStream(stream_id=self.beforeStreams[d].stream_id, + device_index=self.beforeStreams[d].device_index, + device_type=self.beforeStreams[d].device_type) + torch._C._cuda_setDevice(beforeDevice) + +class CudaMemoryLeakCheck: + def __init__(self, testcase, name=None): + self.name = testcase.id() if name is None else name + self.testcase = testcase + + # initialize context & RNG to prevent false positive detections + # when the test is the first to initialize those + from torch.testing._internal.common_cuda import initialize_cuda_context_rng + initialize_cuda_context_rng() + + # Stores CUDA memory data provided by PyTorch's caching allocator and + # the CUDA driver. + # + # NOTE: The undocumented torch.cuda.mem_get_info() returns + # (#free bytes, #total bytes available) on the GPU + def __enter__(self): + self.caching_allocator_befores = [] + self.driver_befores = [] + + # Performs a gc if required (required if any CUDA memory is held) + num_devices = torch.cuda.device_count() + for i in range(num_devices): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + # NOTE: gc is based exclusively on caching allocator memory + # because the driver will always have some bytes in use (context size?) + if caching_allocator_mem_allocated > 0: + gc.collect() + torch._C._cuda_clearCublasWorkspaces() + torch.cuda.empty_cache() + break + + # Acquires caching allocator and driver statistics before the test is run + for i in range(num_devices): + self.caching_allocator_befores.append(torch.cuda.memory_allocated(i)) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + self.driver_befores.append(driver_mem_allocated) + + def __exit__(self, exec_type, exec_value, traceback): + # Don't check for leaks if an exception was thrown + if exec_type is not None: + return + + # Compares caching allocator before/after statistics + # An increase in allocated memory is a discrepancy indicating a possible + # memory leak + discrepancy_detected = False + num_devices = torch.cuda.device_count() + for i in range(num_devices): + # avoid counting cublasWorkspace allocations + torch._C._cuda_clearCublasWorkspaces() + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + discrepancy_detected = True + break + + # Short-circuits if no discrepancy detected + if not discrepancy_detected: + return + + # Validates the discrepancy persists after garbage collection and + # is confirmed by the driver API + + # NOTE: driver API iscrepancies alone are ignored because with the jiterator + # some tests may permanently increase the CUDA context size and + # that will appear as a driver memory leak but is the expected behavior. + + # GCs and clears the cache + gc.collect() + torch.cuda.empty_cache() + + for i in range(num_devices): + + discrepancy_detected = True + + # Query memory multiple items to ensure leak was not transient + for n in range(3): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + + caching_allocator_discrepancy = False + driver_discrepancy = False + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + caching_allocator_discrepancy = True + + if driver_mem_allocated > self.driver_befores[i]: + driver_discrepancy = True + + if not (caching_allocator_discrepancy or driver_discrepancy): + # Leak was false positive, exit loop + discrepancy_detected = False + break + + if not discrepancy_detected: + continue + + if caching_allocator_discrepancy and not driver_discrepancy: + # Just raises a warning if the leak is not validated by the + # driver API + # NOTE: this may be a problem with how the caching allocator collects its + # statistics or a leak too small to trigger the allocation of an + # additional block of memory by the CUDA driver + msg = ("CUDA caching allocator reports a memory leak not " + f"verified by the driver API in {self.name}! " + f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} " + f"and is now reported as {caching_allocator_mem_allocated} " + f"on device {i}. " + f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") + warnings.warn(msg) + elif caching_allocator_discrepancy and driver_discrepancy: + # A caching allocator discrepancy validated by the driver API is a + # failure (except on ROCm, see below) + msg = (f"CUDA driver API confirmed a leak in {self.name}! " + f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} " + f"and is now reported as {caching_allocator_mem_allocated} " + f"on device {i}. " + f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") + + raise RuntimeError(msg) + +@contextmanager +def skip_exception_type(exc_type): + try: + yield + except exc_type as e: + raise unittest.SkipTest(f"not implemented: {e}") from e + +@contextmanager +def print_repro_on_failure(repro_parts): + try: + yield + except unittest.SkipTest: + raise + except Exception as e: + # Get the index of the sample input that failed the test if possible. + sample_isolation_prefix = "" + tracked_input = getattr(e, "_tracked_input", None) + if tracked_input is not None: + sample_isolation_prefix = f"PYTORCH_OPINFO_SAMPLE_INPUT_INDEX={tracked_input.index}" + + repro_str = " ".join(filter(None, (sample_isolation_prefix, *repro_parts))) + repro_msg = f""" +To execute this test, run the following from the base repo dir: + {repro_str} + +This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0""" + + # NB: Hacking the exception args is the cleanest way I've found to append + # failure reproduction info without poisoning the stack trace. + if len(e.args) >= 1: + e.args = (f"{e.args[0]}\n{repro_msg}", *e.args[1:]) + raise + +# "min_satisfying_examples" setting has been deprecated in hypothesis +# 3.56.0 and removed in hypothesis 4.x +try: + import hypothesis + + def settings(*args, **kwargs): + if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0): + kwargs.pop('min_satisfying_examples') + return hypothesis.settings(*args, **kwargs) + + + hypothesis.settings.register_profile( + "pytorch_ci", + settings( + derandomize=True, + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=50, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "dev", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=10, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "debug", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=1000, + verbosity=hypothesis.Verbosity.verbose)) + + hypothesis.settings.load_profile( + "pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev') + ) +except ImportError: + print('Fail to import hypothesis in common_utils, tests are not derandomized') + +# Used in check_if_enable to see if a test method should be disabled by an issue, +# sanitizes a test method name from appended suffixes by @dtypes parametrization. +# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should +# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32 +def remove_device_and_dtype_suffixes(test_name: str) -> str: + # import statement is localized to avoid circular dependency issues with common_device_type.py + from torch.testing._internal.common_device_type import get_device_type_test_bases + device_suffixes = [x.device_type for x in get_device_type_test_bases()] + dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()] + + test_name_chunks = test_name.split("_") + if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes: + if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes: + return "_".join(test_name_chunks[0:-2]) + return "_".join(test_name_chunks[0:-1]) + return test_name + + +def check_if_enable(test: unittest.TestCase): + classname = str(test.__class__).split("'")[1].split(".")[-1] + sanitized_testname = remove_device_and_dtype_suffixes(test._testMethodName) + + def matches_test(target: str): + target_test_parts = target.split() + if len(target_test_parts) < 2: + # poorly formed target test name + return False + target_testname = target_test_parts[0] + target_classname = target_test_parts[1][1:-1].split(".")[-1] + # if test method name or its sanitized version exactly matches the disabled + # test method name AND allow non-parametrized suite names to disable + # parametrized ones (TestSuite disables TestSuiteCPU) + return classname.startswith(target_classname) and (target_testname in (test._testMethodName, sanitized_testname)) + + if any(matches_test(x) for x in slow_tests_dict.keys()): + getattr(test, test._testMethodName).__dict__['slow_test'] = True + if not TEST_WITH_SLOW: + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + + if not IS_SANDCASTLE: + should_skip = False + skip_msg = "" + + for disabled_test, (issue_url, platforms) in disabled_tests_dict.items(): + if matches_test(disabled_test): + platform_to_conditional: Dict = { + "mac": IS_MACOS, + "macos": IS_MACOS, + "win": IS_WINDOWS, + "windows": IS_WINDOWS, + "linux": IS_LINUX, + "rocm": TEST_WITH_ROCM, + "xpu": TEST_XPU, + "asan": TEST_WITH_ASAN, + "dynamo": TEST_WITH_TORCHDYNAMO, + "inductor": TEST_WITH_TORCHINDUCTOR, + "slow": TEST_WITH_SLOW, + } + + invalid_platforms = list(filter(lambda p: p not in platform_to_conditional, platforms)) + if len(invalid_platforms) > 0: + invalid_plats_str = ", ".join(invalid_platforms) + valid_plats = ", ".join(platform_to_conditional.keys()) + + print(f"Test {disabled_test} is disabled for some unrecognized ", + f"platforms: [{invalid_plats_str}]. Please edit issue {issue_url} to fix the platforms ", + 'assigned to this flaky test, changing "Platforms: ..." to a comma separated ', + f"subset of the following (or leave it blank to match all platforms): {valid_plats}") + + # Sanitize the platforms list so that we continue to disable the test for any valid platforms given + platforms = list(filter(lambda p: p in platform_to_conditional, platforms)) + + if platforms == [] or any(platform_to_conditional[platform] for platform in platforms): + should_skip = True + skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \ + f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \ + "If you're seeing this on your local machine and would like to enable this test, " \ + "please make sure CI is not set and you are not using the flag --import-disabled-tests." + break + + if should_skip and not RERUN_DISABLED_TESTS: + # Skip the disabled test when not running under --rerun-disabled-tests verification mode + raise unittest.SkipTest(skip_msg) + + if not should_skip and RERUN_DISABLED_TESTS: + skip_msg = "Test is enabled but --rerun-disabled-tests verification mode is set, so only" \ + " disabled tests are run" + raise unittest.SkipTest(skip_msg) + + if TEST_SKIP_FAST: + if hasattr(test, test._testMethodName) and not getattr(test, test._testMethodName).__dict__.get('slow_test', False): + raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST") + + +# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very +# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of +# `torch.testing._comparison.are_equal`, used for example by the public testing function +# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence +# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only +# change the supported inputs, but the comparison logic is the same. +# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation. + +class RelaxedBooleanPair(BooleanPair): + """Pair for boolean-like inputs. + + In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single + element tensor-like. + """ + _supported_number_types = NumberPair(0, 0)._supported_types + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a + # number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_bool(input, id=id) for input in (actual, expected)] + + def _to_bool(self, bool_like, *, id): + if isinstance(bool_like, np.number): + return bool(bool_like.item()) + elif type(bool_like) in self._supported_number_types: + return bool(bool_like) + elif isinstance(bool_like, (torch.Tensor, np.ndarray)): + numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a boolean. " + f"Got {numel} elements instead.", + id=id + ) + + return bool(bool_like.item()) + else: + return super()._to_bool(bool_like, id=id) + + +class RelaxedNumberPair(NumberPair): + """Pair for number-like inputs. + + In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element + tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when + ``check_dtype=True`` is passed. + + In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also + supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and + ``@toleranceOverride`` decorators. + """ + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float32, + complex: torch.complex64, + } + + def __init__( + self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters + ) -> None: + super().__init__(actual, expected, check_dtype=False, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a number and the other can also be a number or a single + # element tensor or array, whereas in default NumberPair both inputs have to be numbers. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_number(input, id=id) for input in (actual, expected)] + + def _to_number(self, number_like, *, id): + if isinstance(number_like, (torch.Tensor, np.ndarray)): + numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a number. " + f"Got {numel} elements instead.", + id=id + ) + number = number_like.item() + if isinstance(number, bool): + number = int(number) + + return number + elif isinstance(number_like, Enum): + return int(number_like) # type: ignore[call-overload] + else: + return super()._to_number(number_like, id=id) + + +class TensorOrArrayPair(TensorLikePair): + """Pair for tensor-like inputs. + + On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of + :class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a + tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their + relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine. + + In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride`` + and ``@toleranceOverride`` decorators. + """ + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id, allow_subclasses): + self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray)) + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + +class TypedStoragePair(TensorLikePair): + """Pair for :class:`torch.storage.TypedStorage` inputs.""" + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage) + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _to_tensor(self, typed_storage): + return torch.tensor( + typed_storage._untyped_storage, + dtype={ + torch.quint8: torch.uint8, + torch.quint4x2: torch.uint8, + torch.quint2x4: torch.uint8, + torch.qint32: torch.int32, + torch.qint8: torch.int8 + }.get(typed_storage.dtype, typed_storage.dtype), + device=typed_storage.device, + ) + + +class UnittestPair(Pair): + """Fallback ABC pair that handles non-numeric inputs. + + To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in + order to use it with the :class:`Pair` "framework" from :func:`are_equal`. + + Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support. + """ + CLS: Union[Type, Tuple[Type, ...]] + TYPE_NAME: Optional[str] = None + + def __init__(self, actual, expected, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=self.CLS) + super().__init__(actual, expected, **other_parameters) + + def compare(self): + test_case = unittest.TestCase() + + try: + return test_case.assertEqual(self.actual, self.expected) + except test_case.failureException as error: + msg = str(error) + + type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__ + self._fail(AssertionError, f"{type_name.title()} comparison failed: {msg}") + + +class StringPair(UnittestPair): + CLS = (str, bytes) + TYPE_NAME = "string" + + +class SetPair(UnittestPair): + CLS = set + + +class TypePair(UnittestPair): + CLS = type + + +class ObjectPair(UnittestPair): + CLS = object + + +# This implements a variant of assertRaises/assertRaisesRegex where we first test +# if the exception is NotImplementedError, and if so just skip the test instead +# of failing it. +# +# This is implemented by inheriting from the (private) implementation of +# assertRaises from unittest.case, and slightly tweaking it for this new +# behavior. The year is 2021: this private class hierarchy hasn't changed since +# 2010, seems low risk to inherit from. +class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext): + def __exit__(self, exc_type, exc_value, tb): + if exc_type is not None and issubclass(exc_type, NotImplementedError): + self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined] + return super().__exit__(exc_type, exc_value, tb) + + +@contextmanager +def set_warn_always_context(new_val: bool): + old_val = torch.is_warn_always_enabled() + torch.set_warn_always(new_val) + try: + yield + finally: + torch.set_warn_always(old_val) + + +class NoTest: + # causes pytest to not recognize this class as a test + __test__ = False + + +class TestCase(expecttest.TestCase): + # NOTE: "precision" lets classes and generated tests set minimum + # atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for + # example. + # NOTE: "rel_tol" lets classes and generated tests set minimum + # rtol values when comparing tensors. Used by @toleranceOverride, for example. + _precision: float = 0 + _rel_tol: float = 0 + + # Toggles whether to assert that `torch.get_default_dtype()` returns + # `torch.float` when `setUp` and `tearDown` are called. + _default_dtype_check_enabled: bool = False + + # Always use difflib to print diffs on multi line equality. + # Undocumented feature in unittest + _diffThreshold = sys.maxsize + maxDiff = None + + # checker to early terminate test suite if unrecoverable failure occurs. + def _should_stop_test_suite(self): + if torch.cuda.is_initialized(): + # CUDA device side error will cause subsequence test cases to fail. + # stop entire test suite if catches RuntimeError during torch.cuda.synchronize(). + try: + torch.cuda.synchronize() + except RuntimeError as rte: + print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr) + print(str(rte), file=sys.stderr) + return True + return False + else: + return False + + @property + def precision(self) -> float: + return self._precision + + @precision.setter + def precision(self, prec: float) -> None: + self._precision = prec + + @property + def rel_tol(self) -> float: + return self._rel_tol + + @rel_tol.setter + def rel_tol(self, prec: float) -> None: + self._rel_tol = prec + + _do_cuda_memory_leak_check = False + _do_cuda_non_default_stream = False + + # When True, if a test case raises a NotImplementedError, instead of failing + # the test, skip it instead. + _ignore_not_implemented_error = False + + def __init__(self, method_name='runTest', methodName='runTest'): + # methodName is the correct naming in unittest and testslide uses keyword arguments. + # So we need to use both to 1) not break BC and, 2) support testslide. + if methodName != "runTest": + method_name = methodName + super().__init__(method_name) + + test_method = getattr(self, method_name, None) + if test_method is not None: + # Wraps the tested method if we should do CUDA memory check. + if TEST_CUDA_MEM_LEAK_CHECK: + self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True) + # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044 + if self._do_cuda_memory_leak_check and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors) + + # Wraps the tested method if we should enforce non default CUDA stream. + self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True) + if self._do_cuda_non_default_stream and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream) + + if self._ignore_not_implemented_error: + self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError)) + + if PRINT_REPRO_ON_FAILURE: + try: + def _get_rel_test_path(abs_test_path): + # Attempt to get relative path based on the "test" dir. + # In CI, the working dir is not guaranteed to be the base repo dir so + # we can't just compute relative path from that. + parts = Path(abs_test_path).parts + for i, part in enumerate(parts): + if part == "test": + base_dir = os.path.join(*parts[:i]) if i > 0 else '' + return os.path.relpath(abs_test_path, start=base_dir) + + # Can't determine containing dir; just return the test filename. + # The path isn't strictly correct but it's arguably better than nothing. + return os.path.split(abs_test_path)[1] + + # NB: In Python 3.8, the getfile() call will return a path relative + # to the working directory, so convert that to absolute. + abs_test_path = os.path.abspath(inspect.getfile(type(self))) + test_filename = _get_rel_test_path(abs_test_path) + class_name = type(self).__name__ + test_run_cmd = f"python {test_filename} {class_name}.{method_name}" + env_var_prefix = TestEnvironment.repro_env_var_prefix() + repro_parts = [env_var_prefix, test_run_cmd] + self.wrap_with_policy( + method_name, + lambda repro_parts=repro_parts: print_repro_on_failure(repro_parts)) + except Exception as e: + # Don't fail entirely if we can't get the test filename + log.info("could not print repro string", extra=str(e)) + + def assertLeaksNoCudaTensors(self, name=None): + name = self.id() if name is None else name + return CudaMemoryLeakCheck(self, name) + + def enforceNonDefaultStream(self): + return CudaNonDefaultStream() + + def _remove_ansi_escape(self, input): + # 7-bit C1 ANSI sequences + ansi_escape = re.compile(r''' + \x1B # ESC + (?: # 7-bit C1 Fe (except CSI) + [@-Z\\-_] + | # or [ for CSI, followed by a control sequence + \[ + [0-?]* # Parameter bytes + [ -/]* # Intermediate bytes + [@-~] # Final byte + ) + ''', re.VERBOSE) + return ansi_escape.sub('', input) + + def remove_comment_lines(self, input_string): + lines = input_string.split('\n') + filtered_lines = [line for line in lines if not line.strip().startswith('#')] + return '\n'.join(filtered_lines) + + def remove_empty_lines(self, input_string): + lines = input_string.split('\n') + filtered_lines = [line for line in lines if not line.strip() == ''] + return '\n'.join(filtered_lines) + + # ignore comments will ignore lines that starts with # after being stripped + def assertExpectedInline(self, actual, expect, skip=0, ignore_comments=False, ignore_empty_lines=False): + actual = actual if isinstance(actual, str) else str(actual) + actual = self._remove_ansi_escape(actual) + expect = self._remove_ansi_escape(expect) + if ignore_comments: + actual = self.remove_comment_lines(actual) + expect = self.remove_comment_lines(expect) + + if ignore_empty_lines: + actual = self.remove_empty_lines(actual) + expect = self.remove_empty_lines(expect) + + return super().assertExpectedInline(actual if isinstance(actual, str) else str(actual), expect, skip + 1) + + # Munges exceptions that internally contain stack traces, using munge_exc + def assertExpectedInlineMunged( + self, exc_type, callable, expect, *, suppress_suffix=True + ): + try: + callable() + except exc_type as e: + self.assertExpectedInline( + munge_exc(e, suppress_suffix=suppress_suffix, skip=1), expect, skip=1 + ) + return + self.fail(msg="Did not raise when expected to") + + def assertLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertLogs(logger, level) + + def assertNoLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertNoLogs(logger, level) + + def wrap_with_cuda_policy(self, method_name, policy): + test_method = getattr(self, method_name) + # the import below may initialize CUDA context, so we do it only if + # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream + # is True. + # TODO: sure looks like we unconditionally initialize the context here + # -- ezyang + from torch.testing._internal.common_cuda import TEST_CUDA + fullname = self.id().lower() # class_name.method_name + if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname): + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + def wrap_with_policy(self, method_name, policy): + test_method = getattr(self, method_name) + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + # A policy is a zero-argument function that returns a context manager. + # We don't take the context manager directly as it may be necessary to + # construct it once per test method + def wrap_method_with_policy(self, method, policy): + # Assumes that `method` is the tested function in `self`. + # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope + # alive, so this cannot be done in setUp and tearDown because + # tearDown is run unconditionally no matter whether the test + # passes or not. For the same reason, we can't wrap the `method` + # call in try-finally and always do the check. + @wraps(method) + def wrapper(self, *args, **kwargs): + with policy(): + method(*args, **kwargs) + return types.MethodType(wrapper, self) + + def wrap_with_cuda_memory_check(self, method): + return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors) + + def _run_custom(self, result=None): + using_unittest = isinstance(result, unittest.TestResult) + + super_run = super().run + test_cls = super_run.__self__ + + # Are we compiling? + compiled = TEST_WITH_TORCHDYNAMO or TEST_WITH_AOT_EAGER or TEST_WITH_TORCHINDUCTOR + # Is the class strict and compiling? + strict_default = False + should_reset_dynamo = False + if compiled: + try: + path = inspect.getfile(type(test_cls)) + full_path = os.path.abspath(path) + match = re.match(r".*/test/(.*).py", full_path) + if match is not None: + filename = match.group(1) + if TEST_WITH_TORCHINDUCTOR: + from .dynamo_test_failures import FIXME_inductor_non_strict + strict_default = filename not in FIXME_inductor_non_strict + + from .dynamo_test_failures import FIXME_inductor_dont_reset_dynamo + should_reset_dynamo = filename not in FIXME_inductor_dont_reset_dynamo + else: + strict_default = True + # inspect.getfile can fail with these + except (OSError, TypeError): + pass + if "STRICT_DEFAULT" in os.environ: + if os.environ["STRICT_DEFAULT"] == "1": + strict_default = True + + strict_mode = False + if compiled: + test_method = getattr(self, self._testMethodName) + if hasattr(test_method, "dynamo_strict"): + strict_mode = test_method.dynamo_strict + elif hasattr(test_cls, "dynamo_strict"): + strict_mode = test_cls.dynamo_strict + else: + strict_mode = strict_default + nopython = getattr(test_cls, "dynamo_strict_nopython", False) and compiled + + if strict_mode or should_reset_dynamo: + torch._dynamo.reset() + + # TODO: Remove this; this is grandfathered in because we suppressed errors + # on test suite previously + # When strict mode is False, suppress_errors is True + if compiled: + suppress_errors = not strict_mode + else: + suppress_errors = torch._dynamo.config.suppress_errors + with unittest.mock.patch("torch._dynamo.config.suppress_errors", suppress_errors): + if TEST_WITH_TORCHINDUCTOR: + super_run = torch._dynamo.optimize("inductor")(super_run) + elif TEST_WITH_AOT_EAGER: + super_run = torch._dynamo.optimize("aot_eager_decomp_partition")(super_run) + elif TEST_WITH_TORCHDYNAMO: + # TorchDynamo optimize annotation + # Assume eager-generated GraphModules will not error out. + # If we do, this is probably a Dynamo bug! + super_run = torch._dynamo.optimize("eager_noexcept", nopython=nopython)(super_run) + key = f"{self.__class__.__name__}.{self._testMethodName}" + from .dynamo_test_failures import dynamo_expected_failures, dynamo_skips + + def expect_failure(f, test_name): + @wraps(f) + def wrapper(*args, **kwargs): + try: + f(*args, **kwargs) + except BaseException as e: + self.skipTest(e) + raise RuntimeError(f"Unexpected success, please remove `test/dynamo_expected_failures/{test_name}`") + return wrapper + + if key in dynamo_expected_failures: + method = getattr(self, self._testMethodName) + setattr(self, self._testMethodName, expect_failure(method, key)) + + def ignore_failure(f, test_name): + @wraps(f) + def wrapper(*args, **kwargs): + try: + f(*args, **kwargs) + except BaseException as e: + self.skipTest(e) + method = getattr(self, self._testMethodName) + if getattr(method, "__unittest_expecting_failure__", False): + self.skipTest("unexpected success") + else: + self.skipTest(f"This test passed, maybe we can remove `test/dynamo_skips/{test_name}`") + return wrapper + + if key in dynamo_skips: + method = getattr(self, self._testMethodName) + setattr(self, self._testMethodName, ignore_failure(method, key)) + + super_run(result=result) + + if strict_mode or should_reset_dynamo: + torch._dynamo.reset() + + # Early terminate test if necessary. If using pytest, use the -x flag instead + if using_unittest and self._should_stop_test_suite(): + if result.wasSuccessful(): + case = TestCase() + if TEST_SAVE_XML is not None: + # This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo + # Create dummy TestInfo to record results correctly + from xmlrunner.result import _TestInfo # type: ignore[import] + case = _TestInfo(result, case) + case.output = _TestInfo.ERROR + case.elapsed_time = 0.0 + case.test_description = "TestSuiteEarlyFailure" + # This shouldn't really happen, but if does add fake failure + # For more details see https://github.com/pytorch/pytorch/issues/71973 + result.failures.append((case, "TestSuite execution was aborted early")) + assert result.wasSuccessful() is False + result.stop() + + + def run(self, result=None): + with contextlib.ExitStack() as stack: + if TEST_WITH_CROSSREF: + stack.enter_context(CrossRefMode()) + self._run_custom( + result=result, + ) + + def setUp(self): + check_if_enable(self) + set_rng_seed(SEED) + + # Save global check sparse tensor invariants state that can be + # restored from tearDown: + self._check_invariants = torch.sparse.check_sparse_tensor_invariants.is_enabled() + + # Enable invariant checks for all sparse tensors constructions + # including the unsafe ones. If this is not desired for some + # test case, use check_invariants=False optional argument to + # sparse tensor constructors or + # @torch.sparse.check_sparse_tensor_invariants(False) + # decorator to disable the invariant checks. + torch.sparse.check_sparse_tensor_invariants.enable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + # attempt to reset some global state at the end of the test + self._prev_grad_state = torch.is_grad_enabled() + + def tearDown(self): + # There exists test cases that override TestCase.setUp + # definition, so we cannot assume that _check_invariants + # attribute is defined in general. + if hasattr(self, '_check_invariants'): + # Restore the global check sparse tensor invariants state + if self._check_invariants: + torch.sparse.check_sparse_tensor_invariants.enable() + else: + torch.sparse.check_sparse_tensor_invariants.disable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + # attribute may not be defined, per above + if hasattr(self, '_prev_grad_state'): + torch.set_grad_enabled(self._prev_grad_state) + + @staticmethod + def _make_crow_indices(n_rows, n_cols, nnz, + *, device, dtype, random=True): + """Return crow_indices of a CSR tensor with size (n_rows, n_cols) and + the number of specified elements nnz. + + If random is True, the column counts of rows are in random + order. Otherwise, the column counts of rows are defined by the + used sampling method. + + Sampling method + --------------- + + The used sampling method was introduced in + https://pearu.github.io/csr_sampling.html, and here we give + only an overall description of the method. + + Notice that crow_indices can be defined as cumsum(counts) + where counts is a sequence of non-negative integers satisfying + the following conditions: + + len(counts) == n_rows + 1 + counts.max() <= n_cols + + while counts[i + 1] is interpreted as the number of specified + elements in the i-th row. + + The used sampling method aims at increasing the diversity of + CSR samples, that is, a CSR sample should contain (i) rows + that are all filled, (ii) rows with no elements at all, and + (iii) rows that are partially filled. At the same time and for + the given total number of specified elements (nnz), there + should be minimal preference to rows with a given number of + elements. To achieve this, the sampling method is built-up on + using a sawteeth model for counts. In the simplest case, we + would have + + counts = arange(n_rows + 1) % (n_cols + 1) + + that has equal number of all possible column counts per row. + This formula can be used only for specific input values of + n_rows, n_cols, and nnz. To generalize this model to any + combinations of inputs, the counts model above is extended + with an incomplete sawtooth, and the right and lower + rectangular parts that will guarantee that + + counts.sum() == nnz + + for any combination of n_rows, n_cols, and nnz. Basically, + we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid + that is able to hold a sequence of sawteeth and so-called + final correction, while the external part of the window is + filled with counts to meet the nnz constraint exactly. + """ + assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols) + + def sawteeth(n, m): + # return the total number of counts in the sequence of + # sawteeth where n and m define a window in (n_rows+1, + # n_cols+1) rectangle where the sequence of sawteeth + # perfectly fit. + M = (n_cols - m) * (n_cols - m + 1) // 2 + K = (n_rows - n) % (n_cols - m + 1) + return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2 + + # Different from the original method description, here counts + # has leading 0 required by crow_indices: + counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu')) + + n = m = 0 + N = sawteeth(n, m) + if N and nnz >= max(N, n_cols): + # determine the width of the sawteeth window. We use bisection to solve + # N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols) + # for n + n_left = n + n_right = n_rows - 1 + N_right = sawteeth(n_right, m) + while n_right - n_left > 1: + n_middle = (n_left + n_right) // 2 + N_middle = sawteeth(n_middle, m) + if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols): + n_right, N_right = n_middle, N_middle + else: + n_left = n_middle + n, N = n_right, N_right + # fill the right rectangle with counts: + assert n + counts[-n:].fill_(n_cols) + + if N and nnz - n * n_cols >= max(N, n_rows - n): + # determine the height of the sawteeth window. We use bisection to solve + # N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n) + # for m. + m_left = m + m_right = n_cols - 1 + N_right = sawteeth(n, m_right) + while m_right - m_left > 1: + m_middle = (m_left + m_right) // 2 + N_middle = sawteeth(n, m_middle) + if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n): + m_right, N_right = m_middle, N_middle + else: + m_left = m_middle + m, N = m_right, N_right + # fill the bottom rectangle with counts: + assert m + counts[1:n_rows - n + 1].fill_(m) + + if N: + # fill the sawteeth window with counts + q, r = divmod(nnz - n * n_cols - m * (n_rows - n), + (n_cols - m) * (n_cols - m + 1) // 2) + p = 1 + q * (n_cols - m + 1) + k = math.isqrt(2 * r) + if k * (k + 1) > 2 * r: + k -= 1 + corr = r - k * (k + 1) // 2 + assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle + # sequence of full sawteeth: + counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1) + # incomplete sawtooth: + counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device) + else: + # given input does not support sawteeth + p = 1 + corr = nnz - n * n_cols - m * (n_rows - n) + + # correction that will guarantee counts.sum() == nnz: + counts[p] += corr + + if random: + # randomize crow_indices by shuffling the sawteeth + # sequence: + perm = torch.randperm(n_rows, device=counts.device) + counts[1:] = counts[1:][perm] + + # compute crow_indices: + crow_indices = counts + crow_indices.cumsum_(dim=0) + return crow_indices.to(device=device) + + def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0): + from operator import mul + from functools import reduce + sparse_dim = 2 + assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments' + assert len(size) >= sparse_dim + if blocksize: + assert len(blocksize) == 2, (size, blocksize) + assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize) + assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize) + blocksize0, blocksize1 = blocksize + else: + blocksize0 = blocksize1 = 1 + + size = tuple(size) + dense_size = size[(len(size) - dense_dims):] + + def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz): + compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype) + plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device) + for i in range(n_compressed_dims): + count = compressed_indices[i + 1] - compressed_indices[i] + plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort( + torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count]) + low = -1 if dtype != torch.uint8 else 0 + high = 1 if dtype != torch.uint8 else 2 + values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high) + return values, compressed_indices, plain_indices + + batch_shape = size[:-2 - dense_dims] + n_batch = reduce(mul, batch_shape, 1) + + if layout in {torch.sparse_csr, torch.sparse_bsr}: + n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1 + else: + n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0 + blocknnz = nnz // (blocksize0 * blocksize1) + sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)] + sparse_tensors_it = map(list, zip(*sparse_tensors)) + + values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size) + compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + return torch.sparse_compressed_tensor(compressed_indices, plain_indices, + values, size=size, dtype=dtype, layout=layout, device=device) + + def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims) + + def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0) + + def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype): + # Assert not given impossible combination, where the sparse dims have + # empty numel, but nnz > 0 makes the indices containing values. + assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' + + v_size = [nnz] + list(size[sparse_dim:]) + v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1) + i = torch.rand(sparse_dim, nnz, device=device) + i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) + i = i.to(torch.long) + if is_uncoalesced: + i1 = i[:, :(nnz // 2), ...] + i2 = i[:, :((nnz + 1) // 2), ...] + i = torch.cat([i1, i2], 1) + x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device) + + if not is_uncoalesced: + x = x.coalesce() + else: + # FIXME: `x` is a sparse view of `v`. Currently rebase_history for + # sparse views is not implemented, so this workaround is + # needed for inplace operations done on `x`, e.g., copy_(). + # Remove after implementing something equivalent to CopySlice + # for sparse views. + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards + x = x.detach().clone()._coalesced_(False) + return x, x._indices().clone(), x._values().clone() + + def generate_simple_inputs(self, layout, + device=None, + dtype=None, + index_dtype=None, + pin_memory=None, + members_pin_memory=None, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=True, + enable_non_contiguous_values=True, + enable_batch_variable_nse=False, + output_tensor=True, + patterns=None): + """Generator of simple inputs for tensor constructors of the given layout. + + The generated tensor inputs have the following properties: + + - tensor shapes are minimal but not trivial + - tensor values are sorted sequences for COO and CSR formats, e.g. [1, 2, 3, 4] + - the generated tensors represent the same mathematical tensor for all layouts + - the generated tensors include regular, zero-sized, and optionally, batched or/and hybrid tensors. + - the generated tensors include contiguous or non-contiguous tensors both in indices and values + + If output_tensor is True, yield tensors with the given + layout. Otherwise, yield inputs to the corresponding tensor + constructors: + + - sparse compressed input is defined as + (compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype, + pin_memory=pin_memory) + + - sparse COO input is defined as + (indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype, pin_memory=pin_memory) + + - strided input is defined as + (values,), dict(device=device, dtype=dtype) + """ + if index_dtype is None: + index_dtype = torch.int64 + + is_compressed_sparse_layout = layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + + if output_tensor: + for args, kwargs in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype, + pin_memory=pin_memory, + enable_batch=enable_batch, enable_hybrid=enable_hybrid, + enable_zero_sized=enable_zero_sized, + enable_non_contiguous_indices=enable_non_contiguous_indices, + enable_non_contiguous_values=enable_non_contiguous_values, + enable_batch_variable_nse=enable_batch_variable_nse, + output_tensor=False): + if members_pin_memory: + args = tuple(a.pin_memory() for a in args) + if layout is torch.strided: + assert len(args) == 1 + size = kwargs.pop('size', None) # to ensure that a zero-sized tensor has the desired shape + assert size is not None + if pin_memory: + yield args[0].reshape(size).pin_memory() + else: + yield args[0].reshape(size) + elif layout is torch.sparse_coo: + yield torch.sparse_coo_tensor(*args, **kwargs) + elif is_compressed_sparse_layout: + kwargs.update(layout=layout) + yield torch.sparse_compressed_tensor(*args, **kwargs) + else: + assert 0 # unreachable + return + + def get_blockpattern(pattern, blocksize): + basesize = pattern.shape + assert basesize[0] % blocksize[0] == 0, (basesize, blocksize) + assert basesize[1] % blocksize[1] == 0, (basesize, blocksize) + blockpattern = pattern.reshape(-1, + blocksize[0], + basesize[1] // blocksize[1], + blocksize[1]).transpose(-3, -2).any(-1).any(-1) + block_ids = torch.arange(1, blockpattern.numel() + 1).reshape(blockpattern.shape) + return (blockpattern != 0) * block_ids + + def get_sparse_data(pattern): + basesize = pattern.shape + assert len(basesize) == 2, basesize # pattern is expected to be a matrix + + # We cannot use `torch.sparse_xyz_tensor(pattern)` to + # compute the sparse layout indices and values because + # generate_simple_inputs is used to generate the inputs to + # test `torch.sparse_xyz_tensor` factory functions, so + # we'll compute the indices and values independently of + # the factory functions. + + indices = torch.where(pattern != 0) + coo_indices = torch.stack(indices) + crow_indices = torch.zeros(basesize[0] + 1, dtype=torch.int64) + crow_indices[1:] = torch.cumsum(coo_indices[0].bincount(minlength=basesize[0]), 0) + col_indices = coo_indices[1] + strided_values = torch.zeros(basesize, dtype=torch.int64) + + # the property of `values == range(1, 1+nnz)` is used in + # get_sparse_data_with_block to relate BSR and BSC values, + # so, don't change the following line: + values = torch.arange(1, 1 + len(indices[0]), dtype=torch.int64) + strided_values[indices] = values + + indices_T = torch.where(pattern.transpose(0, 1) != 0) + coo_indices_T = torch.stack(indices_T) + ccol_indices = torch.zeros(basesize[1] + 1, dtype=torch.int64) + ccol_indices[1:] = torch.cumsum(coo_indices_T[0].bincount(minlength=basesize[1]), 0) + row_indices = coo_indices_T[1] + csc_values = strided_values.transpose(0, 1)[indices_T] + + return {torch.sparse_coo: (coo_indices, values), + torch.sparse_csr: (crow_indices, col_indices, values), + torch.sparse_csc: (ccol_indices, row_indices, csc_values), + torch.strided: (strided_values,)} + + def get_sparse_data_with_block(pattern, blocksize): + nonblock_data = get_sparse_data(pattern) + blockpattern = get_blockpattern(pattern, blocksize) + block_data = get_sparse_data(blockpattern) + + strided_values = nonblock_data[torch.strided][0] + block_indices = block_data[torch.sparse_coo][0] + bsr_values = torch.stack([strided_values[bi * blocksize[0]:(bi + 1) * blocksize[0], + bj * blocksize[1]:(bj + 1) * blocksize[1]] + for bi, bj in block_indices.transpose(0, 1)]) + + # here we use the property `values == range(1, 1+nnz)` and + # `values` relation to `csc_values` (see get_sparse_data) + # to get BSC blocks via reordering the BSR blocks: + bsc_values = bsr_values[block_data[torch.sparse_csc][2] - 1] + + return {torch.sparse_bsr: (*block_data[torch.sparse_csr][:2], bsr_values), + torch.sparse_bsc: (*block_data[torch.sparse_csc][:2], bsc_values), + **nonblock_data} + + def get_batch_sparse_data(pattern, blocksize): + size = pattern.shape + if len(size) <= 2: # non-batch + return get_sparse_data_with_block(pattern, blocksize) + + # batch data is created recursively: + batch_data = {} + for i, item in enumerate(pattern): + for layout, d in get_batch_sparse_data(item, blocksize).items(): + target = batch_data.get(layout) + if layout is torch.sparse_coo: + # a "batch COO" means a COO with the leading + # sparse dimensions interpreted as batch + # dimensions + ext_coo_indices1 = torch.cat((torch.full((1, len(d[1])), i, dtype=torch.int64), d[0])) + if target is None: + target = batch_data[layout] = (ext_coo_indices1, d[1]) + else: + target[0].set_(torch.cat((target[0], ext_coo_indices1), 1)) + target[1].set_(torch.cat((target[1], d[1]))) + else: + if target is None: + target = batch_data[layout] = tuple(d[j].unsqueeze(0) for j in range(len(d))) + else: + for j in range(len(d)): + target[j].set_(torch.cat((target[j], d[j].unsqueeze(0)))) + return batch_data + + def generate_values(base, densesize): + """Generates a tensor of shape densesize with values equal to + + base + i_1 * 10^0 + ... + i_d * 10^{d - 1} + + at indices i_1, ..., i_d (with 0 <= i_j < densesize[j] for any 1 <= j <= + len(densesize)) + + This mapping produces unique values as long as + densesize[i] < 10 for all i in range(len(densesize)). + """ + + if not densesize: + return base + if not isinstance(base, int) and base.ndim > 0: + return torch.stack([generate_values(b, densesize) for b in base]) + if base == 0: + return torch.zeros(densesize, dtype=torch.int64) + r = torch.arange(densesize[0], dtype=torch.int64) + for i, d in enumerate(densesize[1:]): + y = torch.arange(d, dtype=torch.int64) * (10 ** (i + 1)) + r = r[..., None] + y[None, ...] + r.add_(base) + return r + + if patterns is None: + # A pattern is a 3-tuple with the following items: + # + # - a list of integers with the depth of two or more. The + # integers define the sparsity patterns of the generated + # inputs: zero values correspond to unspecified + # elements/blocks, and non-zero values to the specified + # elements. + # + # For debugging convenience, the elements with the same + # value typically belong to the same block. However, it + # is not a hard requirement: as long as the shape of a + # pattern divides with block sizes, the pattern will be + # a valid one. + # + # If the depth of the list is larger than two, inputs + # with batch dimensions will be generated. + # + # - a list of 2-tuples of block sizes, used to generate + # BSR/BSC tensors with various block size parameters + # + # - a list of tuples of dense dimensions, used to generate + # hybrid tensors with various dense dimensions + # + patterns = [ + # a simple 3 x 2 tensor: non-hybrid, hybrid with 1 and 2 dense dimensions + ([[1, 2, 0], + [1, 0, 3]], [(2, 1), (1, 3)], [(), (2,), (4, 5)]), + # 2 x 3 batch of 3 x 2 tensors: non-hybrid and hybrid with 2 dense dimensions + ([[[[1, 2, 0], + [1, 0, 3]], + [[1, 2, 3], + [1, 0, 0]], + [[1, 0, 0], + [1, 2, 3]]], + [[[0, 2, 0], + [1, 2, 3]], + [[1, 0, 3], + [1, 2, 0]], + [[1, 2, 3], + [0, 2, 0]]]], [(2, 1), (2, 3)], [(), (2,)]), + # tensor with non-trivial blocksize + ([[0, 1, 0, 2, 0, 2], + [0, 1, 0, 0, 2, 0], + [3, 3, 3, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 5, 0, 6, 6, 6], + [5, 0, 5, 6, 6, 6], + [0, 0, 0, 0, 8, 8], + [7, 7, 7, 0, 8, 8]], [(2, 3)], [(), (4, 5)]), + # batch tensor with variable NSE + # Requires https://github.com/pytorch/pytorch/pull/84843 or similar. + ([[[1, 2], + [3, 4]], + [[1, 0], + [0, 0]]], [(1, 1)], ([()] if enable_batch_variable_nse else []))] + + def non_contiguous_copy(t, dim=-1, offset=0): + # return a copy of t that is non-contiguous along the + # given dimension and with the given storage offset + self.assertTrue(t.is_contiguous()) + if dim < 0: + dim = dim + t.ndim + assert dim >= 0 and dim < t.ndim + step = max(2, offset + 1) + tmp = torch.zeros((*t.shape[:dim], t.shape[dim] * step, *t.shape[dim + 1:]), dtype=t.dtype, device=t.device) + dim_slices = (*((slice(None),) * dim), slice(offset, None, step)) + r = tmp[dim_slices].copy_(t) + self.assertFalse(r.is_contiguous()) + self.assertEqual(t, r) + return r + + # the main loop of the method: + for pattern, blocksizes, densesizes in patterns: + if not enable_hybrid: + densesizes = [s for s in densesizes if not s] + if not (densesizes and blocksizes): + continue + pattern = torch.tensor(pattern, dtype=torch.int64) + if not enable_batch and pattern.ndim > 2: + continue + for blocksize in blocksizes: + data = get_batch_sparse_data(pattern, blocksize)[layout] + for densesize in densesizes: + indices = [a.to(device=device, dtype=index_dtype) for a in data[:-1]] + values = generate_values(data[-1], densesize).to(device=device, dtype=dtype) + kwargs = dict(device=device, dtype=dtype, size=pattern.shape + densesize) + if pin_memory is not None: + kwargs.update(pin_memory=pin_memory) + + yield (*indices, values), kwargs.copy() + if enable_non_contiguous_indices and pattern.ndim > 2: + # sparse compressed indices can be sliced only along batch dimensions + for (dim, offset) in {(0, 1), (-2, 0)}: + indices_copy = [non_contiguous_copy(a, dim=dim, offset=offset) for a in indices] + yield (*indices_copy, values), kwargs.copy() + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices_copy, values_copy), kwargs.copy() + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices, values_copy), kwargs.copy() + + # zero-sized tensor inputs, non-batch, non-hybrid/hybrid + if enable_zero_sized: + for basesize, blocksizes, densesizes in [ + ((2, 0), [(1, 2)], [(), (2,), (2, 3)] if enable_hybrid else [()]), + ((0, 2), [(1, 2), (2, 1), (3, 2)], [()]), + ((0, 0), [(1, 2)], [()]), + ]: + for blocksize in blocksizes: + for densesize in densesizes: + if layout == torch.strided: + indices = () + values = torch.empty((basesize + densesize), device=device, dtype=dtype) + elif layout == torch.sparse_coo: + indices = (torch.empty(len(basesize), 0, device=device, dtype=index_dtype),) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csr: + crow_indices = torch.tensor([0] * (basesize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csc: + ccol_indices = torch.tensor([0] * (basesize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsr: + crow_indices = torch.tensor([0] * (basesize[0] // blocksize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsc: + ccol_indices = torch.tensor([0] * (basesize[1] // blocksize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + else: + assert 0 # unreachable + kwargs = dict(device=device, dtype=dtype, size=basesize + densesize) + if pin_memory is not None: + kwargs.update(pin_memory=pin_memory) + yield (*indices, values), kwargs + + def safeToDense(self, t): + # coalesce is only implemented for COO + if t.layout == torch.sparse_coo: + t = t.coalesce() + return t.to_dense() + + # Compares a torch function with a reference function for a given sample input (object of SampleInput) + # Note: only values are compared, type comparison is not done here + def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs): + numpy_sample = sample_input.numpy() + n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs + t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs + + actual = torch_fn(t_inp, *t_args, **t_kwargs) + expected = ref_fn(n_inp, *n_args, **n_kwargs) + + self.assertEqual(actual, expected, exact_device=False, **kwargs) + + # Compares the given Torch and NumPy functions on the given tensor-like object. + # NOTE: both torch_fn and np_fn should be functions that take a single + # tensor (array). If the torch and/or NumPy function require additional + # arguments then wrap the function in a lambda or pass a partial function. + # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol) + def compare_with_numpy(self, torch_fn, np_fn, tensor_like, + device=None, dtype=None, **kwargs): + assert TEST_NUMPY + + if isinstance(tensor_like, torch.Tensor): + assert device is None + assert dtype is None + t_cpu = tensor_like.detach().cpu() + if t_cpu.dtype is torch.bfloat16: + t_cpu = t_cpu.float() + a = t_cpu.numpy() + t = tensor_like + else: + d = copy.copy(torch_to_numpy_dtype_dict) + d[torch.bfloat16] = np.float32 + a = np.array(tensor_like, dtype=d[dtype]) + t = torch.tensor(tensor_like, device=device, dtype=dtype) + + np_result = np_fn(a) + torch_result = torch_fn(t).cpu() + + # Converts arrays to tensors + if isinstance(np_result, np.ndarray): + try: + np_result = torch.from_numpy(np_result) + except Exception: + # NOTE: copying an array before conversion is necessary when, + # for example, the array has negative strides. + np_result = torch.from_numpy(np_result.copy()) + if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float: + torch_result = torch_result.to(torch.float) + + self.assertEqual(np_result, torch_result, **kwargs) + + def assertEqualIgnoreType(self, *args, **kwargs) -> None: + # If you are seeing this function used, that means test is written wrongly + # and deserves detailed investigation + return self.assertEqual(*args, exact_dtype=False, **kwargs) + + def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None: + r"""Tests if tensor x equals to y, if y to be broadcast to x.shape. + """ + if not isinstance(y, Iterable): + # int, float, etc. or different shape tensors + y = torch.ones_like(x) * y + if not isinstance(y, torch.Tensor): + # iterable, but not a tensor + y = torch.ones_like(x) * torch.tensor(y) + return self.assertEqual(x, y, *args, **kwargs) + + def assertEqual( + self, + x, + y, + msg: Optional[Union[str, Callable[[str], str]]] = None, + *, + atol: Optional[float] = None, + rtol: Optional[float] = None, + equal_nan=True, + exact_dtype=True, + # TODO: default this to True + exact_device=False, + exact_layout=False, + exact_stride=False, + exact_is_coalesced=False + ): + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + # numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall + # back to an elementwise comparison. Note that this has to happen here and not for example in + # `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform + # multiple comparisons. + if any( + isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y) + ): + def to_list(input): + return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input) + + x = to_list(x) + y = to_list(y) + # When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here. + # Otherwise, the pair origination of `are_equal` will fail, because the sequence is recognized as container + # that should be checked elementwise while the tensor is not. + elif isinstance(x, torch.Tensor) and isinstance(y, Sequence): + y = torch.as_tensor(y, dtype=x.dtype, device=x.device) + elif isinstance(x, Sequence) and isinstance(y, torch.Tensor): + x = torch.as_tensor(x, dtype=y.dtype, device=y.device) + + # unbind NSTs to compare them; don't do this for NJTs + if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.strided: + x = x.unbind() + if isinstance(y, torch.Tensor) and y.is_nested and y.layout == torch.strided: + y = y.unbind() + + error_metas = not_close_error_metas( + x, + y, + pair_types=( + NonePair, + RelaxedBooleanPair, + RelaxedNumberPair, + TensorOrArrayPair, + TypedStoragePair, + StringPair, + SetPair, + TypePair, + ObjectPair, + ), + sequence_types=( + Sequence, + Sequential, + ModuleList, + ParameterList, + ScriptList, + torch.utils.data.dataset.Subset, + ), + mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict), + rtol=rtol, + rtol_override=self.rel_tol, + atol=atol, + atol_override=self.precision, + equal_nan=equal_nan, + check_device=exact_device, + check_dtype=exact_dtype, + check_layout=exact_layout, + check_stride=exact_stride, + check_is_coalesced=exact_is_coalesced, + ) + + if error_metas: + # See [ErrorMeta Cycles] + error_metas = [error_metas] + # TODO: compose all metas into one AssertionError + raise error_metas.pop()[0].to_error( + # This emulates unittest.TestCase's behavior if a custom message passed and + # TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage) + # is True (default) + (lambda generated_msg: f"{generated_msg}\n{msg}") if isinstance(msg, str) and self.longMessage else msg + ) + + def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override] + atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None: + with self.assertRaises(AssertionError, msg=msg): + self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs) + + def assertEqualTypeString(self, x, y) -> None: + # This API is used simulate deprecated x.type() == y.type() + self.assertEqual(x.device, y.device) + self.assertEqual(x.dtype, y.dtype) + self.assertEqual(x.is_sparse, y.is_sparse) + + def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None: + for elem in iterable: + if id(obj) == id(elem): + return + raise AssertionError("object not found in iterable") + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaises(self, expected_exception, *args, **kwargs): + if self._ignore_not_implemented_error: + context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \ + AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg] + try: + return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr] + finally: + # see https://bugs.python.org/issue23890 + context = None + else: + return super().assertRaises(expected_exception, *args, **kwargs) + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): + # Verifies that an exception with the type expected_exception and message + # matching the regular expression defined by expected_regex is thrown. + # If the test is instantiated for a non-native device type (like XLA) + # then the message is not validated. + + # Checks whether the test is instantiated for a device type by testing + # if the test class has defined the device_type attribute and, + # if so, tests whether the instantiated device type is native or not + if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES and self.device_type != "mps": # type: ignore[attr-defined] + # empty string matches any string + expected_regex = '' + + if self._ignore_not_implemented_error: + context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg] + expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined] + else: + return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs) + + # Verifies that no unraisable exceptions are raised by callable. Unlike regular + # exceptions, these do not actually propagate to the caller and are + # suppressed. We must test for them specially. + def assertNoUnraisable(self, callable, *args, **kwargs): + raised = None + + def record_unraisable(unraisable): + nonlocal raised + raised = unraisable + + # Disable GC when running the callable to prevent spurious flakiness + # from unlucky GCs inside the callable + prev = gc.isenabled() + gc.disable() + try: + with unittest.mock.patch("sys.unraisablehook", record_unraisable): + callable(*args, **kwargs) + finally: + if prev: + gc.enable() + + self.assertIsNone(raised) + + # TODO: Support context manager interface + # NB: The kwargs forwarding to callable robs the 'subname' parameter. + # If you need it, manually apply your callable in a lambda instead. + def assertExpectedRaises(self, exc_type, callable, *args, **kwargs): + subname = None + if 'subname' in kwargs: + subname = kwargs['subname'] + del kwargs['subname'] + try: + callable(*args, **kwargs) + except exc_type as e: + self.assertExpected(str(e), subname) + return + # Don't put this in the try block; the AssertionError will catch it + self.fail(msg="Did not raise when expected to") + + def assertNotWarn(self, callable, msg=''): + r""" + Test if :attr:`callable` does not raise a warning. + """ + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + callable() + self.assertTrue(len(ws) == 0, msg) + + @contextmanager + def assertWarnsOnceRegex(self, category, regex=''): + """Context manager for code that *must always* warn + + This filters expected warnings from the test and fails if + the expected warning is not caught. It uses set_warn_always() to force + TORCH_WARN_ONCE to behave like TORCH_WARN + """ + pattern = re.compile(regex) + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + yield + if len(ws) == 0: + self.fail('no warning caught') + self.assertTrue(any(type(w.message) is category for w in ws)) + self.assertTrue( + any(re.match(pattern, str(w.message)) for w in ws), + f'{pattern}, {[w.message for w in ws if type(w.message) is category]}') + + def assertExpected(self, s, subname=None): + r""" + Test that a string matches the recorded contents of a file + derived from the name of this test and subname. This file + is placed in the 'expect' directory in the same directory + as the test script. You can automatically update the recorded test + output using --accept. + + If you call this multiple times in a single function, you must + give a unique subname each time. + """ + if not isinstance(s, str): + raise TypeError("assertExpected is strings only") + + def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix):] + return text + # NB: we take __file__ from the module that defined the test + # class, so we place the expect directory where the test script + # lives, NOT where test/common_utils.py lives. This doesn't matter in + # PyTorch where all test scripts are in the same directory as + # test/common_utils.py, but it matters in onnx-pytorch + module_id = self.__class__.__module__ + munged_id = remove_prefix(self.id(), module_id + ".") + test_file = os.path.realpath(sys.modules[module_id].__file__) + expected_file = os.path.join(os.path.dirname(test_file), + "expect", + munged_id) + + subname_output = "" + if subname: + expected_file += "-" + subname + subname_output = f" ({subname})" + expected_file += ".expect" + expected = None + + def accept_output(update_type): + print(f"Accepting {update_type} for {munged_id}{subname_output}:\n\n{s}") + with open(expected_file, 'w') as f: + # Adjust for producer_version, leave s unmodified + s_tag = re.sub(r'(producer_version): "[0-9.]*"', + r'\1: "CURRENT_VERSION"', s) + f.write(s_tag) + + try: + with open(expected_file) as f: + expected = f.read() + except OSError as e: + if e.errno != errno.ENOENT: + raise + elif expecttest.ACCEPT: + return accept_output("output") + else: + raise RuntimeError( + f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" + "No expect file exists; to accept the current output, run:\n" + f"python {__main__.__file__} {munged_id} --accept") from None + + # a hack for JIT tests + if IS_WINDOWS: + expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected) + s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s) + + # Adjust for producer_version + expected = expected.replace( + 'producer_version: "CURRENT_VERSION"', + f'producer_version: "{torch.onnx.producer_version}"' + ) + if expecttest.ACCEPT: + if expected != s: + return accept_output("updated output") + else: + if hasattr(self, "assertMultiLineEqual"): + # Python 2.7 only + # NB: Python considers lhs "old" and rhs "new". + self.assertMultiLineEqual(expected, s) + else: + self.assertEqual(s, expected) + + def assertExpectedStripMangled(self, s, subname=None): + s = re.sub(r'__torch__[^ ]+', '', s) + self.assertExpected(s, subname) + + def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None): + """Assert that ``first`` is greater than or almost equal to ``second``. + + The equality of ``first`` and ``second`` is determined in a similar way to + the ``assertAlmostEqual`` function of the standard library. + """ + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + + if first >= second: + return + + diff = second - first + if delta is not None: + if diff <= delta: + return + + standardMsg = f"{first} not greater than or equal to {second} within {delta} delta" + else: + if places is None: + places = 7 + + if round(diff, places) == 0: + return + + standardMsg = f"{first} not greater than or equal to {second} within {places} places" + + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertAtenOp(self, onnx_model, operator, overload_name=""): + all_aten_nodes = [p for p in onnx_model.graph.node + if p.op_type == "ATen" and p.domain == "org.pytorch.aten"] + self.assertTrue(all_aten_nodes) + + for op in all_aten_nodes: + attrs = {attr.name: attr.s.decode() for attr in op.attribute} + if attrs.get("operator") == operator: + break + + self.assertEqual(attrs["operator"], operator) + self.assertEqual(attrs.get("overload_name", ""), overload_name) + + def check_nondeterministic_alert(self, fn, caller_name, should_alert=True): + '''Checks that an operation produces a nondeterministic alert when + expected while `torch.use_deterministic_algorithms(True)` is set. + + Args: + fn (callable): Function to check for a nondeterministic alert + + caller_name (str): Name of the operation that produces the + nondeterministic alert. This name is expected to appear at the + beginning of the error/warning message. + + should_alert (bool, optional): If True, then the check will only pass + if calling `fn` produces a nondeterministic error/warning with the + expected message. If False, then the check will only pass if + calling `fn` does not produce an error. Default: `True`. + ''' + + alert_message = '^' + caller_name + ' does not have a deterministic implementation, but you set' + + # Check that errors are thrown correctly + with DeterministicGuard(True): + if should_alert: + with self.assertRaisesRegex( + RuntimeError, + alert_message, + msg='expected a non-deterministic error, but it was not raised'): + fn() + + else: + # If a nondeterministic error is not expected, make sure + # that it is not raised + try: + fn() + except RuntimeError as e: + if 'does not have a deterministic implementation' in str(e): + self.fail( + 'did not expect non-deterministic error message, ' + + 'but got one anyway: "' + str(e) + '"') + # Reraise exceptions unrelated to nondeterminism + raise + + # Check that warnings are thrown correctly + with DeterministicGuard(True, warn_only=True): + if should_alert: + with self.assertWarnsRegex( + UserWarning, + alert_message): + fn() + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fn() + for warning in w: + if isinstance(warning, UserWarning): + self.assertTrue(re.search(alert_message, str(warning)) is None) + + # run code in subprocess and capture exceptions. + @staticmethod + def run_process_no_exception(code, env=None): + import subprocess + + popen = subprocess.Popen( + [sys.executable, '-c', code], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + (stdout, stderr) = popen.communicate() + return (stdout, stderr) + + # returns captured stderr + @staticmethod + def runWithPytorchAPIUsageStderr(code): + env = os.environ.copy() + env["PYTORCH_API_USAGE_STDERR"] = "1" + # remove CI flag since this is a wrapped test process. + # CI flag should be set in the parent process only. + env.pop("CI", None) + env.pop("TEST_SHOWLOCALS", None) + (stdout, stderr) = TestCase.run_process_no_exception(code, env=env) + return stderr.decode('ascii') + + +class TestCaseBase(TestCase): + # Calls to super() in dynamically created classes are a bit odd. + # See https://github.com/pytorch/pytorch/pull/118586 for more info + # Subclassing this class and then calling super(TestCaseBase) will run + # TestCase's setUp, tearDown etc functions + pass + + +def download_file(url, binary=True): + from urllib.parse import urlsplit + from urllib import request, error + + filename = os.path.basename(urlsplit(url)[2]) + data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data')) + path = os.path.join(data_dir, filename) + + if os.path.exists(path): + return path + try: + data = request.urlopen(url, timeout=15).read() + with open(path, 'wb' if binary else 'w') as f: + f.write(data) + return path + except error.URLError as e: + msg = f"could not download test file '{url}'" + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) from e + +def find_free_port(): + """ + Finds an available port and returns that port number. + + NOTE: If this function is being used to allocate a port to Store (or + indirectly via init_process_group or init_rpc), it should be used + in conjuction with the `retry_on_connect_failures` decorator as there is a potential + race condition where the allocated port may become unavailable before it can be used + """ + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('localhost', 0)) + _, port = sock.getsockname() + return port + +# Errors that we can get in c10d initialization for which we should retry tests for. +ADDRESS_IN_USE = "Address already in use" +CONNECT_TIMEOUT = "connect() timed out." + +def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)): + """Reruns a test if the test returns a RuntimeError and the exception + contains one of the strings in connect_errors.""" + # This if block is executed when using this function as a decorator with arguments. + if func is None: + return partial(retry_on_connect_failures, connect_errors=connect_errors) + + @wraps(func) + def wrapper(*args, **kwargs): + n_retries = 10 + tries_remaining = n_retries + while True: + try: + return func(*args, **kwargs) + except RuntimeError as error: + if any(connect_error in str(error) for connect_error in connect_errors): + tries_remaining -= 1 + if tries_remaining == 0: + raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}") from error + time.sleep(random.random()) + continue + raise + return wrapper + + +# Decorator to retry upon certain Exceptions. +def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False): + def deco_retry(f): + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + while mtries > 1: + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) + print(msg) + time.sleep(mdelay) + mtries -= 1 + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e + return f_retry # true decorator + return deco_retry + + +# FIXME: modernize these to be consistent with make_tensor +# and review including them in torch.testing +# Methods for matrix generation + +def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'): + assert rank <= l + A = torch.randn(l, l, dtype=dtype, device=device) + u, s, vh = torch.linalg.svd(A, full_matrices=False) + for i in range(l): + if i >= rank: + s[i] = 0 + elif s[i] == 0: + s[i] = 1 + return (u * s.to(dtype).unsqueeze(-2)) @ vh + +def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): + """ + Returns a random rectangular matrix (batch of matrices) + with singular values sampled from a Gaussian with + mean `mean` and standard deviation `sigma`. + The smaller the `sigma`, the better conditioned + the output matrix is. + """ + primitive_dtype = { + torch.float: torch.float, + torch.double: torch.double, + torch.cfloat: torch.float, + torch.cdouble: torch.double + } + x = torch.rand(shape, dtype=dtype, device=device) + m = x.size(-2) + n = x.size(-1) + u, _, vh = torch.linalg.svd(x, full_matrices=False) + s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ + .sort(-1, descending=True).values.to(dtype) + return (u * s.unsqueeze(-2)) @ vh + +# Returns a noncontiguous (tensor with the same shape and values as t +# The noncontiguous tensor is constructed such that elements in the innermost +# dimension are separated by zeros or (whenever possible) nans +# TODO: consider more complicated noncontiguity schemes +def noncontiguous_like(t): + # Short-circuits if t is already noncontiguous + if not t.is_contiguous(): + return t + + # Choose a "weird" value that won't be accessed + if t.dtype.is_floating_point or t.dtype.is_complex: + value = math.nan + elif t.dtype == torch.bool: + value = True + else: + value = 12 + + result = t.new_empty(t.shape + (2,)) + result[..., 0] = value + result[..., 1] = t.detach() + result = result[..., 1] + result.requires_grad_(t.requires_grad) + return result + +# TODO: remove this (prefer make_symmetric_matrices below) +def random_symmetric_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mT).div_(2) + return A + +# Creates a symmetric matrix or batch of symmetric matrices +# Shape must be a square matrix or batch of square matrices +def make_symmetric_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + t = (t + t.mT).div_(2) + return t + +def random_hermitian_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mH).div_(2) + return A + + +def random_symmetric_psd_matrix(l, *batches, **kwargs): + """ + Returns a batch of random symmetric positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + return A @ A.mT + + +def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'): + """ + Returns a batch of random Hermitian positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) + return A @ A.mH + + +# TODO: remove this (prefer make_symmetric_pd_matrices below) +def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return torch.matmul(A, A.mT) \ + + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5 + + +# Creates a symmetric positive-definite matrix or batch of +# such matrices +def make_symmetric_pd_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5 + return t @ t.mT + i + +def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device): + """ + Returns a batch of random Hermitian positive-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device) + +# Creates a full rank matrix with distinct singular values or +# a batch of such matrices +def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False): + with torch.no_grad(): + t = make_tensor(shape, device=device, dtype=dtype) + u, _, vh = torch.linalg.svd(t, full_matrices=False) + real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype + k = min(shape[-1], shape[-2]) + # We choose the singular values to be "around one" + # This is to make the matrix well conditioned + # s = [2, 3, ..., k+1] + s = torch.arange(2, k + 2, dtype=real_dtype, device=device) + # s = [2, -3, 4, ..., (-1)^k k+1] + s[1::2] *= -1. + # 1 + 1/s so that the singular values are in the range [2/3, 3/2] + # This gives a condition number of 9/4, which should be good enough + s.reciprocal_().add_(1.) + # Note that the singular values need not be ordered in an SVD so + # we don't need need to sort S + x = (u * s.to(u.dtype)) @ vh + x.requires_grad_(requires_grad) + return x + +def random_matrix(rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices. + + Parameters: + dtype - the data type + device - the device kind + singular - when True, the output will be singular + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + silent = kwargs.get("silent", False) + singular = kwargs.get("singular", False) + if silent and not torch._C.has_lapack: + return torch.ones(rows, columns, dtype=dtype, device=device) + + A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device) + if A.numel() == 0: + return A + u, _, vh = torch.linalg.svd(A, full_matrices=False) + k = min(rows, columns) + s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device) + if singular: + # make matrix singular + s[k - 1] = 0 + if k > 2: + # increase the order of singularity so that the pivoting + # in LU factorization will be non-trivial + s[0] = 0 + return (u * s.unsqueeze(-2)) @ vh + + +def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices with + given rank. + """ + B = random_matrix(rows, rank, *batch_dims, **kwargs) + C = random_matrix(rank, columns, *batch_dims, **kwargs) + return B.matmul(C) + + +def _generate_indices_prefer_all_rows(rows: int, cols: int, num_indices: int) -> torch.Tensor: + """Generate indices for a row x cols matrix, preferring at least one index per row if possible.""" + indices = [] + n_per_row = math.ceil(num_indices / rows) + col_indices = list(range(cols)) + + for r in range(rows): + # Note that this can yield overlapping indices + for c in random.choices(col_indices, k=n_per_row): + indices.append((r, c)) + + return torch.tensor(indices[:num_indices]) + + +def random_sparse_matrix(rows, columns, density=0.01, **kwargs): + """Return rectangular random sparse matrix within given density. + + The density of the result approaches to given density as the size + of the matrix is increased and a relatively small value of density + is specified but higher than min(rows, columns)/(rows * columns) + for non-singular matrices. + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + + nonzero_elements = max(min(rows, columns), int(rows * columns * density)) + indices = _generate_indices_prefer_all_rows(rows, columns, nonzero_elements) + values = torch.randn(nonzero_elements, dtype=dtype, device=device) + + # ensure that the diagonal dominates + values *= torch.tensor([-float(i - j)**2 for i, j in indices], dtype=dtype, device=device).exp() + A = torch.sparse_coo_tensor(indices.t(), values, (rows, columns), device=device) + return A.coalesce() + + +def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs): + """Return random sparse positive-definite matrix with given density. + + The eigenvalues of the matrix are defined as:: + arange(1, matrix_size+1)/matrix_size + + Algorithm: + A = diag(arange(1, matrix_size+1)/matrix_size) + while : + + R = + A = R^T A R + """ + import math + torch = kwargs.get('torch', globals()['torch']) + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + data = {(i, i): float(i + 1) / matrix_size + for i in range(matrix_size)} + + + def multiply(data, N, i, j, cs, sn, left=True): + for k in range(N): + if left: + ik, jk = (k, i), (k, j) + else: + ik, jk = (i, k), (j, k) + aik, ajk = data.get(ik, 0), data.get(jk, 0) + aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk + if aik: + data[ik] = aik + else: + data.pop(ik, None) + if ajk: + data[jk] = ajk + else: + data.pop(jk, None) + + target_nnz = density * matrix_size * matrix_size + while len(data) < target_nnz: + i = random.randint(0, matrix_size - 1) + j = random.randint(0, matrix_size - 1) + if i != j: + theta = random.uniform(0, 2 * math.pi) + cs = math.cos(theta) + sn = math.sin(theta) + multiply(data, matrix_size, i, j, cs, sn, left=True) + multiply(data, matrix_size, i, j, cs, sn, left=False) + icoords, jcoords, values = [], [], [] + for (i, j), v in sorted(data.items()): + icoords.append(i) + jcoords.append(j) + values.append(v) + indices_tensor = torch.tensor([icoords, jcoords]) + return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) + +# FIXME: remove this by updating test suites using it +def do_test_dtypes(self, dtypes, layout, device): + for dtype in dtypes: + if dtype != torch.float16: + out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device) + self.assertIs(dtype, out.dtype) + self.assertIs(layout, out.layout) + self.assertEqual(device, out.device) + +# FIXME: remove this by updating test suites using it +def do_test_empty_full(self, dtypes, layout, device): + shape = torch.Size([2, 3]) + + def check_value(tensor, dtype, layout, device, value, requires_grad): + self.assertEqual(shape, tensor.shape) + self.assertIs(dtype, tensor.dtype) + self.assertIs(layout, tensor.layout) + self.assertEqual(tensor.requires_grad, requires_grad) + if tensor.is_cuda and device is not None: + self.assertEqual(device, tensor.device) + if value is not None: + fill = tensor.new(shape).fill_(value) + self.assertEqual(tensor, fill) + + def get_int64_dtype(dtype): + module = '.'.join(str(dtype).split('.')[1:-1]) + if not module: + return torch.int64 + return operator.attrgetter(module)(torch).int64 + + default_dtype = torch.get_default_dtype() + check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False) + check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False) + for dtype in dtypes: + for rg in {dtype.is_floating_point, False}: + int64_dtype = get_int64_dtype(dtype) + v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg) + check_value(v, dtype, layout, device, None, rg) + out = v.new() + check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, None, rg) + check_value(v.new_empty(shape), dtype, layout, device, None, False) + check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + check_value(torch.empty_like(v), dtype, layout, device, None, False) + check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + + if dtype is not torch.float16 and layout != torch.sparse_coo: + fv = 3 + v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg) + check_value(v, dtype, layout, device, fv, rg) + check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False) + out = v.new() + check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, fv + 2, rg) + check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 3, False) + check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False) + check_value(torch.full_like(v, fv + 5, + dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 5, False) + +# FIXME: improve load_tests() documentation here +running_script_path = None +def set_running_script_path(): + global running_script_path + try: + running_file = os.path.abspath(os.path.realpath(sys.argv[0])) + if running_file.endswith('.py'): # skip if the running file is not a script + running_script_path = running_file + except Exception: + pass + +def check_test_defined_in_running_script(test_case): + if running_script_path is None: + return + test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) + assert test_case_class_file == running_script_path, f'Class of loaded TestCase "{test_case.id()}" ' \ + f'is not defined in the running script "{running_script_path}", but in "{test_case_class_file}". Did you ' \ + "accidentally import a unittest.TestCase from another file?" + +def load_tests(loader, tests, pattern): + set_running_script_path() + test_suite = unittest.TestSuite() + for test_group in tests: + if not DISABLE_RUNNING_SCRIPT_CHK: + for test in test_group: + check_test_defined_in_running_script(test) + if test_group._tests: + test_suite.addTest(test_group) + return test_suite + +# FIXME: document this and move it to test_serialization +class BytesIOContext(io.BytesIO): + def __enter__(self): + return self + + def __exit__(self, *args): + pass + +# Tentative value for nondet_tol for gradcheck when backward implementation +# relies on nondeterministic operations, i.e., those listed here: +# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html +# +# For more information see https://github.com/pytorch/pytorch/issues/56202 +GRADCHECK_NONDET_TOL = 1e-12 + +TEST_WITH_SLOW_GRADCHECK: bool = TestEnvironment.def_flag( + "TEST_WITH_SLOW_GRADCHECK", + env_var="PYTORCH_TEST_WITH_SLOW_GRADCHECK", +) + +skipIfSlowGradcheckEnv = unittest.skipIf( + TEST_WITH_SLOW_GRADCHECK, + "Tests that don't use gradcheck don't need to run on slow_gradcheck CI", +) + + +def gradcheck(fn, inputs, **kwargs): + # Wrapper around gradcheck that enables certain keys by default. + # Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and + # forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks + # to be disabled to default for the public-facing api to avoid breaking user code. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck. + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradcheck(fn, inputs, **kwargs) + +def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs): + # Wrapper around gradgradcheck that enables certain keys by default + # See gradcheck above for an explanation of why we need something like this. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs) + + +def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs): + # call assert function rather than returning a bool since it's nicer + # if we get whether this failed on the gradcheck or the gradgradcheck. + test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs)) + test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs)) + + +@contextmanager +def set_cwd(path: str) -> Iterator[None]: + old_cwd = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(old_cwd) + + +# FIXME: delete this +# Using @toleranceOverride specific to your test is the recommended way +# of doing this. These are just some values that worked for test_nn. +dtype2prec_DONTUSE = {torch.float: 1e-5, + torch.double: 1e-5, + torch.half: 1e-2, + torch.bfloat16: 1e-1} + +# FIXME: move to test_sparse or sparse utils +# This is a wrapper that wraps a test to run this test twice, one with +# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors. +def coalescedonoff(f): + @wraps(f) + def wrapped(self, *args, **kwargs): + f(self, *args, **kwargs, coalesced=True) + f(self, *args, **kwargs, coalesced=False) + return wrapped + + +def is_coalesced_indices(s): + indices = s._indices() + hash_coeffs = (1,) + s.shape[s.sparse_dim() - 1:0:-1] + hash_indices = torch.tensor(hash_coeffs, device=s.device).cumprod(-1).flip(-1) + if s.sparse_dim() > 1: + hash_indices.unsqueeze_(-1) + hash_indices = (indices * hash_indices).sum(0) + else: + hash_indices = indices * hash_indices + + # check if indices are sorted + res = torch.allclose(hash_indices, hash_indices.sort()[0]) + + # check if there are no repeated indices + res = res and torch.allclose(hash_indices, hash_indices.unique()) + + return res + + +@contextlib.contextmanager +def disable_gc(): + if gc.isenabled(): + try: + gc.disable() + yield + finally: + gc.enable() + else: + yield + + +def find_library_location(lib_name: str) -> Path: + # return the shared library file in the installed folder if exist, + # else the file in the build folder + torch_root = Path(torch.__file__).resolve().parent + path = torch_root / 'lib' / lib_name + if os.path.exists(path): + return path + torch_root = Path(__file__).resolve().parent.parent.parent + return torch_root / 'build' / 'lib' / lib_name + +def skip_but_pass_in_sandcastle(reason): + """ + Similar to unittest.skip, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if not IS_SANDCASTLE: + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + return func + + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return + return wrapper + + return decorator + +def mock_wrapper(method): + """ + Returns a function that calls the real implementation of a method + in addition to passing args to a mock object. + """ + mock = MagicMock() + + @wraps(method) + def wrapper(self, *args, **kwargs): + mock(*args, **kwargs) + return method(self, *args, **kwargs) + wrapper.mock = mock # type: ignore[attr-defined] + return wrapper + +def get_tensors_from(args, kwargs): + """ Returns a set of all Tensor objects in the given args and kwargs. """ + return set([arg for arg in args if isinstance(arg, Tensor)] + + [v for v in kwargs.values() if isinstance(v, Tensor)]) + + +# Returns scalar tensor representation of a list of integer byte values +def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device): + dtype_to_ctype: Dict[torch.dtype, Any] = { + torch.int8: ctypes.c_int8, + torch.uint8: ctypes.c_uint8, + torch.uint16: ctypes.c_uint16, + torch.uint32: ctypes.c_uint32, + torch.uint64: ctypes.c_uint64, + torch.int16: ctypes.c_int16, + torch.int32: ctypes.c_int32, + torch.int64: ctypes.c_int64, + torch.bool: ctypes.c_bool, + torch.float32: ctypes.c_float, + torch.complex64: ctypes.c_float, + torch.float64: ctypes.c_double, + torch.complex128: ctypes.c_double, + } + ctype = dtype_to_ctype[dtype] + num_bytes = ctypes.sizeof(ctype) + + def check_bytes(byte_list): + for byte in byte_list: + assert 0 <= byte <= 255 + + if dtype.is_complex: + assert len(byte_list) == (num_bytes * 2) + check_bytes(byte_list) + real = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[:num_bytes])).value + imag = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[num_bytes:])).value + res = real + 1j * imag + else: + assert len(byte_list) == num_bytes + check_bytes(byte_list) + res = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list)).value + + return torch.tensor(res, device=device, dtype=dtype) + + +def copy_func(f): + """Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)""" + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, + argdefs=f.__defaults__, + closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g + + +def xfail_inherited_tests(tests): + """ + Given a list of test names which are defined by a superclass of the + class this decorates, mark them as expected failure. This is useful + if you are doing poor man's parameterized tests by subclassing a generic + test class. + """ + def deco(cls): + for t in tests: + # NB: expectedFailure operates by mutating the method in question, + # which is why you have to copy the function first + setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t)))) + return cls + return deco + + +def skip_but_pass_in_sandcastle_if(condition, reason): + """ + Similar to unittest.skipIf, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if condition: + if IS_SANDCASTLE: + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return wrapper + else: + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + + return func + + return decorator + +def dtype_name(dtype): + """ Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """ + return str(dtype).split('.')[1] + + +dtype_abbrs = { + torch.bfloat16: 'bf16', + torch.float64: 'f64', + torch.float32: 'f32', + torch.float16: 'f16', + torch.complex32: 'c32', + torch.complex64: 'c64', + torch.complex128: 'c128', + torch.int8: 'i8', + torch.int16: 'i16', + torch.int32: 'i32', + torch.int64: 'i64', + torch.bool: 'b8', + torch.uint8: 'u8', +} + + +@functools.lru_cache +def get_cycles_per_ms() -> float: + """Measure and return approximate number of cycles per millisecond for torch.cuda._sleep + """ + + def measure() -> float: + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record() + torch.cuda._sleep(1000000) + end.record() + end.synchronize() + cycles_per_ms = 1000000 / start.elapsed_time(end) + return cycles_per_ms + + # Get 10 values and remove the 2 max and 2 min and return the avg. + # This is to avoid system disturbance that skew the results, e.g. + # the very first cuda call likely does a bunch of init, which takes + # much longer than subsequent calls. + # + # Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs + # and seems to return stable values. Therefore, we enable caching + # using lru_cache decorator above. + num = 10 + vals = [] + for _ in range(num): + vals.append(measure()) + vals = sorted(vals) + return mean(vals[2 : num - 2]) + + +# OpInfo utils + +T = TypeVar('T') +def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T: + """ + Returns the first sample from an iterable of samples, like those returned by OpInfo. + The test will be skipped if no samples are available. + """ + try: + return next(iter(samples)) + except StopIteration as e: + raise unittest.SkipTest('Skipped! Need at least 1 sample input') from e + +# this helper method is to recursively +# clone the tensor-type input of operators tested by OpInfo +def clone_input_helper(input): + if isinstance(input, torch.Tensor): + return torch.clone(input) + + if isinstance(input, Sequence): + return tuple(map(clone_input_helper, input)) + + return input + +@contextmanager +def custom_op(opname, symbolic_fn, opset_version): + """Context manager/decorator to test ONNX export with custom operator""" + try: + register_custom_op_symbolic(opname, symbolic_fn, opset_version) + yield + finally: + unregister_custom_op_symbolic(opname, opset_version) + + +def outs_and_grads(fn, graph_inps, inps): + outs = fn(*graph_inps) + for out in pytree.tree_leaves(outs): + if isinstance(out, torch.Tensor) and out.requires_grad: + out.sum().backward(retain_graph=True) + grads = [inp.grad for inp in pytree.tree_leaves(inps) if isinstance(inp, torch.Tensor)] + for inp in pytree.tree_leaves(inps): + if isinstance(inp, torch.Tensor): + inp.grad = None + return outs, grads + +def compare_equal_outs_and_grads(test, m1, m2, inps): + r1, g1 = outs_and_grads(m1, inps, inps) + r2, g2 = outs_and_grads(m2, inps, inps) + test.assertEqual(r1, r2) + test.assertEqual(g1, g2) + +class TestGradients(TestCase): + exact_dtype = True + + # Copies inputs to inplace operations to avoid inplace modifications + # to leaves requiring gradient + def _get_safe_inplace(self, inplace_variant): + @wraps(inplace_variant) + def _fn(t, *args, **kwargs): + return inplace_variant(t.clone(), *args, **kwargs) + + return _fn + + def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad') + # NB: check_backward_ad does not affect gradgradcheck (always True) + if variant is None: + self.skipTest("Skipped! Variant not implemented.") + if not op.supports_dtype(dtype, torch.device(device).type): + self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}") + + def is_inplace(variant): + if hasattr(variant, "__wrapped__"): + return variant.__wrapped__ is op.get_inplace() + return variant is op.get_inplace() + + include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex + + samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs, + small_inputs_only=TEST_WITH_SLOW_GRADCHECK) + + for sample in samples: + if sample.broadcasts_input and is_inplace(variant): + continue + + # Gradcheck expects tensors as its input, but autograd actually supports tensorlists + # and tensors passed as kwargs. The following creates a function that accepts just + # the tensors that require grad as varargs, and then recomposes them back into the + # original input. + + # Creates gradcheck inputs by identifying tensors requiring grad + all_args = None + if is_iterable_of_tensors(sample.input): + all_args = chain(sample.input, sample.args, sample.kwargs.values()) + else: + all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values())) + gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad)) + + # Verifies sample input tensors should have no grad + # This may happen if the same tensor is used in two different SampleInputs + for t in gradcheck_args: + self.assertIsNone(t.grad, + "A sampled input has a gradient before running autograd. " + "This usually means that (at least) one input tensor is reused " + "across different SampleInputs. " + "Please create a new tensor for each SampleInput.") + + def _input_recomposition_helper(inputs, inp, input_idx): + if is_iterable_of_tensors(inp): + tensor_list = [] + for x in inp: + if isinstance(x, torch.Tensor) and x.requires_grad: + tensor_list.append(inputs[input_idx]) + input_idx = input_idx + 1 + else: + tensor_list.append(x) + return tensor_list, input_idx + elif isinstance(inp, torch.Tensor) and inp.requires_grad: + return inputs[input_idx], input_idx + 1 + else: + return inp, input_idx + + def fn(*inputs): + # Puts inputs back into sample properly + positional_args = [] + input_idx = 0 + inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx) + positional_args.append(inp) + + for x in sample.args: + inp, input_idx = _input_recomposition_helper(inputs, x, input_idx) + positional_args.append(inp) + + # Recreates kwargs + kwargs = {} + for k, v in sample.kwargs.items(): + inp, input_idx = _input_recomposition_helper(inputs, v, input_idx) + kwargs[k] = inp + + output = op.gradcheck_wrapper(variant, *positional_args, **kwargs) + if sample.output_process_fn_grad is not None: + return sample.output_process_fn_grad(output) + return output + + if check == 'gradcheck': + if check_batched_grad is None: + check_batched_grad = op.check_batched_grad + self.assertTrue(gradcheck(fn, gradcheck_args, + check_batched_grad=check_batched_grad, + check_grad_dtypes=True, + nondet_tol=op.gradcheck_nondet_tol, + fast_mode=op.gradcheck_fast_mode, + check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, + check_undefined_grad=True, + check_batched_forward_grad=check_batched_forward_grad)) + elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check + self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck") + for gen_non_contig_grad_outputs in (False, True): + kwargs = { + "gen_non_contig_grad_outputs": gen_non_contig_grad_outputs, + "check_batched_grad": op.check_batched_gradgrad, + "check_grad_dtypes": True, + "nondet_tol": op.gradcheck_nondet_tol, + "fast_mode": op.gradcheck_fast_mode + } + if check == "fwgrad_bwgrad": + kwargs["check_fwd_over_rev"] = True + kwargs["check_rev_over_rev"] = False + kwargs["check_batched_grad"] = False + kwargs["check_undefined_grad"] = False + + self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs)) + else: + self.assertTrue(False, msg="Unknown check requested!") + + def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad, + check_batched_forward_grad=check_batched_forward_grad) + + def _skip_helper(self, op, device, dtype): + if dtype not in op.supported_backward_dtypes(torch.device(device).type): + self.skipTest("Skipped! Op doesn't support autograd for this dtype.") + if not op.supports_autograd and not op.supports_forward_ad: + self.skipTest("Skipped! autograd not supported.") + +def make_lazy_class(cls): + + def lazy_init(self, cb): + self._cb = cb + self._value = None + + cls.__init__ = lazy_init + + for basename in [ + "add", "sub", "mul", "truediv", "floordiv", "mod", "divmod", "pow", + "lshift", "rshift", "and", "or", "xor", "neg", "pos", "abs", "invert", + "eq", "ne", "lt", "le", "gt", "ge", "bool", "int", "index", + ]: + name = f"__{basename}__" + + def inner_wrapper(name): + use_operator = basename not in ("bool", "int") + + def wrapped(self, *args, **kwargs): + if self._cb is not None: + self._value = self._cb() + self._cb = None + if not use_operator: + return getattr(self._value, name)(*args, **kwargs) + else: + return getattr(operator, name)(self._value, *args, **kwargs) + return wrapped + + setattr(cls, name, inner_wrapper(name)) + + return cls + + +# Base TestCase for NT tests; used to define common helpers, etc. +class NestedTensorTestCase(TestCase): + def assertEqualIgnoringNestedInts(self, a, b): + # unbinding NJTs allows us to compare them as essentially equal without + # caring about exact nested int comparison + def _unbind_njts(x): + if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.jagged: + return x.unbind() + else: + return x + + self.assertEqual(pytree.tree_map(_unbind_njts, a), pytree.tree_map(_unbind_njts, b)) + + @contextlib.contextmanager + def branch_nested_state(self): + """Context manager to branch and restore the nested tensor state.""" + nested_tensor_module = torch.nested._internal.nested_tensor + original_tensor_symint_registry = nested_tensor_module._tensor_symint_registry.copy() + original_tensor_id_counter = nested_tensor_module._tensor_id_counter + try: + yield + finally: + nested_tensor_module._tensor_id_counter = original_tensor_id_counter + nested_tensor_module._tensor_symint_registry = original_tensor_symint_registry + + +@make_lazy_class +class LazyVal: + pass + + +def munge_exc(e, *, suppress_suffix=True, suppress_prefix=True, file=None, skip=0): + if file is None: + file = inspect.stack()[1 + skip].filename # skip one frame + + file = _as_posix_path(file) + s = _as_posix_path(str(e)) + + # Remove everything that looks like stack frames in NOT this file + def repl_frame(m): + if m.group(1) != file: + return "" + # Don't accept top-level, even for this script, these will wobble + # depending on how the testing script was invoked + if m.group(2) == "": + return "" + + return m.group(0) + + s = re.sub(r' File "([^"]+)", line \d+, in (.+)\n( .+\n( +[~^]+ *\n)?)+', repl_frame, s) + s = re.sub(r"line \d+", "line N", s) + s = re.sub(r".py:\d+", ".py:N", s) + s = re.sub(file, _as_posix_path(os.path.basename(file)), s) + s = re.sub(_as_posix_path(os.path.join(os.path.dirname(torch.__file__), "")), "", s) + if suppress_suffix: + s = re.sub(r"\n*Set TORCH_LOGS.+", "", s, flags=re.DOTALL) + s = re.sub(r"\n*You can suppress this exception.+", "", s, flags=re.DOTALL) + if suppress_prefix: + s = re.sub(r"Cannot export model.+\n\n", "", s) + s = re.sub(r" +$", "", s, flags=re.MULTILINE) + return s diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..908dd133a148bcef9dfd8dcd662c1eaff4f0f117 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf36a0f5aa487b8112e1088d3c33f063b93bbcbe Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/checkpoint_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8adc839f2bd37382e5cc5706ff7c2b7729f48738 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/common_state_dict.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..080d376b6947350beb1fabaa18ea4379e0d0f635 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..481f9207e4589721bda6e22b2afd51987fe34b47 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/fake_pg.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f25d5315e08ca109e3845d33ddf2e6d9457dafd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/multi_threaded_pg.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57016a0daaa361a54fecd040a7a44e3f32f24b97 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/rpc_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..acc7005c6b9e3d64d1ca50714839b0732d41b5a5 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py @@ -0,0 +1 @@ +# mypy: allow-untyped-defs diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..303d06063fda26d56bf999b3041f779492f16af3 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,98 @@ +# mypy: allow-untyped-defs + +import sys +from functools import wraps, partial + +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + tp_transports, +) + +TEST_GPU_NUM = 4 + +class ShardedTensorTestBase(MultiProcessTestCase): + @property + def world_size(self): + return TEST_GPU_NUM + + def init_pg(self, backend="nccl"): + if backend not in ["nccl", "gloo", "mpi"]: + raise RuntimeError(f"Backend {backend} not supported!") + + dist.init_process_group( + backend=backend, + world_size=self.world_size, + rank=self.rank, + init_method=f"file://{self.file_name}", + ) + + # set device for nccl pg for collectives + if backend == "nccl": + torch.cuda.set_device(self.rank) + + + def init_rpc(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports()) + rpc_backend_options.init_method = f"file://{self.file_name}" + for rank in range(self.world_size): + rpc_backend_options.set_device_map( + f"worker{rank}", {rank: self.rank, self.rank: rank} + ) + + rpc.init_rpc( + name="worker%d" % self.rank, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + def init_comms(self, init_rpc=True, backend="nccl"): + if init_rpc: + self.init_rpc() + self.init_pg(backend=backend) + + def destroy_comms(self, destroy_rpc=True): + # Wait for all ranks to reach here before starting shutdown. + dist.barrier() + + if destroy_rpc: + rpc.shutdown() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + def assert_sharded_tensor_equal(self, st1, st2): + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + self.assertEqual(len(st1_local_shards), len(st2_local_shards)) + for i, st1_local_shard in enumerate(st1_local_shards): + self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor) + self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata) + + self.assertEqual(st1.metadata(), st2.metadata()) + self.assertEqual(st1.sharding_spec(), st2.sharding_spec()) + self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards())) + +# wrapper to initialize comms (processgroup + rpc) +def with_comms(func=None, init_rpc=True, backend="nccl"): + if func is None: + return partial( + with_comms, + init_rpc=init_rpc, + backend=backend, + ) + + @wraps(func) + def wrapper(self, *args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + self.init_comms(init_rpc=init_rpc, backend=backend) + func(self, *args, **kwargs) + self.destroy_comms(destroy_rpc=init_rpc) + return wrapper diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97feae29303d28054776ea9885e2c8c19c25d970 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2cb5e4c558dfba204e7e48bf325ea955495952c Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_st_common.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py new file mode 100644 index 0000000000000000000000000000000000000000..398b2fd8a36aa2b43e67ae0d161ab0df1c1d51d4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py @@ -0,0 +1,136 @@ +# mypy: allow-untyped-defs + +import builtins + +import torch +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, + EnumerableShardingSpec, + ShardMetadata, +) +from torch.distributed._shard.sharding_spec._internals import ( + get_chunked_dim_size, + get_split_size, +) + + +def generate_chunk_sharding_specs_for_test(sharding_dim): + return [ + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ), + # Test different ordering. (Case 1) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:2/cuda:2", + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ), + # Test different ordering. (Case 2) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + ], + ), + ] + + +def generate_enumerable_sharding_specs_for_test(): + return [ + EnumerableShardingSpec( + [ + ShardMetadata( + shard_offsets=[0, 0], + shard_sizes=[5, 5], + placement="rank:0/cuda:0", + ), + ShardMetadata( + shard_offsets=[5, 0], + shard_sizes=[5, 5], + placement="rank:1/cuda:1", + ), + ShardMetadata( + shard_offsets=[0, 5], + shard_sizes=[5, 5], + placement="rank:2/cuda:2", + ), + ShardMetadata( + shard_offsets=[5, 5], + shard_sizes=[5, 5], + placement="rank:3/cuda:3", + ), + ] + ) + ] + + +def generate_local_weight_sharding_params_for_test( + local_weight, sharded_dim, gpu_num, spec, rank +): + """ + Shard the local weight based the given spec, so we can compare against + the one from sharded tensor. + + Args: + local_weight: weight matrix to be sharded. + sharded_dim: The dimension which we shard on. + gpu_num: number of ranks. + spec: sharding spec. + rank: # of cuda process. + + Returns: + start_pos: start position of sharded weight on the given rank. + chunk_size: chunk size of sharded weight on the given rank. + """ + sharding_dim_size = local_weight.size(sharded_dim) + split_size = get_split_size(sharding_dim_size, gpu_num) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size + + +def clone_module_parameter(module, param_name): + """ + Clone a parameter from a given existing module. + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned. + param_name (str): Name of the parameter of ``module`` that needs to be cloned. + + Returns: cloned tensor as :class:`torch.nn.Parameter`. + """ + tensor = getattr(module, param_name) + return torch.nn.Parameter(tensor.detach().clone()) + +def gen_binary_op_func(python_op, inplace=False): + src_lines = ['def f(lhs, rhs):'] + if "torch" in python_op: + src_lines.append(f' return {python_op}(lhs, rhs)\n') + elif inplace: + src_lines.append(f' lhs {python_op}= rhs\n return lhs\n') + else: + src_lines.append(f' return lhs {python_op} rhs\n') + + code_str = '\n'.join(src_lines) + g = {'torch': torch} + builtins.exec(code_str, g) + return g["f"] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e7a23b6f52d9ae34fb50f70e117c23ae1c81bf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py @@ -0,0 +1,66 @@ +# mypy: allow-untyped-defs + +import copy +import random +import torch +from torch.distributed._shard import sharded_tensor + +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, +) + +PLACEMENTS = [ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", +] + +DEFAULT_GPU_NUM = 4 + + +def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0): + spec_list = [] + for i in range(len(sharding_dims)): + random.Random(seed + i).shuffle(PLACEMENTS) + spec_list.append( + ChunkShardingSpec( + dim=sharding_dims[i], + placements=copy.deepcopy(PLACEMENTS), + ) + ) + return spec_list + +class MyShardedModel2(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor2 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor2 = None + self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2)) + + +class MyShardedModel1(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor1 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor1 = None + self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2)) + self.submodule = MyShardedModel2(spec, group, init_rrefs) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..26bb19dfd68e2fced32337ecabf58a79cddf46e2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py @@ -0,0 +1,42 @@ +# mypy: allow-untyped-defs + +import torch +import torch.nn as nn + +from torch.distributed._shard.sharded_tensor import ShardedTensor + + +class SimpleMegatronLM(nn.Module): + def __init__(self, linear_size, rank=None, dtype=torch.float32): + super().__init__() + self.fc1 = nn.Linear(*linear_size[0], dtype=dtype) + self.gelu = nn.GELU() + self.fc2 = nn.Linear(*linear_size[1], dtype=dtype) + if rank is not None: + self.fc1.cuda(rank) + self.fc2.cuda(rank) + + def forward(self, inp): + return self.fc2(self.gelu(self.fc1(inp))) + + def get_weights(self): + if isinstance(self.fc1.weight, ShardedTensor): + weight1 = self.fc1.weight.local_tensor() + else: + weight1 = self.fc1.weight + + if isinstance(self.fc2.weight, ShardedTensor): + weight2 = self.fc2.weight.local_tensor() + else: + weight2 = self.fc2.weight + + return (weight1, weight2) + + def get_biases(self): + return (self.fc1.bias, self.fc2.bias) + + def get_weight_grads(self): + return (self.fc1.weight.grad, self.fc2.weight.grad) + + def get_bias_grads(self): + return (self.fc1.bias.grad, self.fc2.bias.grad) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py new file mode 100644 index 0000000000000000000000000000000000000000..8514be7979190ced204c2286ec7fac454a63d78e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/common_dtensor.py @@ -0,0 +1,548 @@ +# mypy: allow-untyped-defs + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import itertools +import sys +from dataclasses import dataclass +from functools import wraps +from typing import Any, Callable, cast, Dict, Iterator, List, Sequence, Tuple, TypeVar + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard +from torch.distributed._tensor.placement_types import Placement +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + parallelize_module, + PrepareModuleInput, + RowwiseParallel, + SequenceParallel, +) +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + skip_if_lt_x_gpu, + run_subtests, + TEST_SKIPS, +) + +from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec + +DEVICE_TYPE = ( + "cuda" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else "cpu" +) + +NUM_DEVICES = 4 + +# We use this as a proxy for "multiple GPUs exist" +if torch.cuda.is_available() and torch.cuda.device_count() > 1: + # when we actually have multiple GPUs, relax the requirement to smaller counts. + NUM_DEVICES = min(NUM_DEVICES, torch.cuda.device_count()) + +T = TypeVar("T") + + +# simple RMSNorm layer for testing +class RMSNormPython(torch.nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.ones(dim)) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x) + return output * self.weight + + +class MLPModule(nn.Module): + def __init__(self, device, bias: bool = True): + super().__init__() + torch.manual_seed(5) + self.net1 = nn.Linear(10, 16, bias=bias, device=device) + self.relu = nn.ReLU() + self.net2 = nn.Linear(16, 10, bias=bias, device=device) + + def forward(self, x): + return self.net2(self.relu(self.net1(x))) + + def reset_parameters(self): + self.net1.reset_parameters() + self.net2.reset_parameters() + + +class MLPStacked(nn.Module): + def __init__(self, device, n_layers: int = 2): + super().__init__() + self.layers = nn.ModuleList([MLPModule(device) for i in range(n_layers)]) + + def forward(self, x): + for layer in self.layers: + x = layer(x) + return x + + +@dataclass +class ModelArgs: + n_layers: int = 2 + vocab_size: int = 8 + max_seq_len: int = 16 + dim: int = 16 + n_heads: int = 4 + dropout_p: float = 0.1 + use_attn_mask: bool = True + weight_tying: bool = True + checkpoint_activations: bool = False + + +class Attention(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.dim % args.n_heads == 0 + self.head_dim = args.dim // args.n_heads + self.n_heads = args.n_heads + self.dropout_p = args.dropout_p + self.resid_dropout = nn.Dropout(args.dropout_p) + self.use_attn_mask = args.use_attn_mask + + self.wq = nn.Linear(args.dim, args.dim, bias=False) + self.wk = nn.Linear(args.dim, args.dim, bias=False) + self.wv = nn.Linear(args.dim, args.dim, bias=False) + self.wo = nn.Linear(args.dim, args.dim, bias=False) + + def forward(self, x): + bsz, seq_len, _ = x.size() + queries, keys, values = self.wq(x), self.wk(x), self.wv(x) + queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim) + keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim) + values = values.view(bsz, seq_len, self.n_heads, self.head_dim) + + queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) + + output = F.scaled_dot_product_attention( + queries, + keys, + values, + None, + self.dropout_p if self.training else 0, + self.use_attn_mask, + ) + output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1) + return self.resid_dropout(self.wo(output)) + + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout_p): + super().__init__() + self.w1 = nn.Linear(dim, hidden_dim) + self.gelu = nn.GELU() + self.w2 = nn.Linear(hidden_dim, dim) + self.resid_dropout = nn.Dropout(dropout_p) + + def forward(self, x): + return self.resid_dropout(self.w2(self.gelu(self.w1(x)))) + + +class TransformerBlock(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.attention_norm = nn.LayerNorm(args.dim) + self.attention = Attention(args) + self.ffn_norm = nn.LayerNorm(args.dim) + self.feed_forward = FeedForward( + args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p + ) + + def forward(self, x): + h = x + self.attention(self.attention_norm(x)) + out = h + self.feed_forward(self.ffn_norm(h)) + return out + + +# A toy transformer model, partly inspired by the nanoGPT model: +# https://github.com/karpathy/nanoGPT. +class Transformer(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + assert args.vocab_size is not None + assert args.max_seq_len is not None + self.model_args = args + self.max_seq_len = args.max_seq_len + self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) + self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim) + self.dropout = nn.Dropout(args.dropout_p) + self.layers = nn.ModuleList() + for _ in range(args.n_layers): + self.layers.append(TransformerBlock(args)) + self.norm = nn.LayerNorm(args.dim) + self.output = nn.Linear(args.dim, args.vocab_size, bias=False) + if args.weight_tying: + self.output.weight = self.tok_embeddings.weight + self.checkpoint_activations = args.checkpoint_activations + + def forward(self, tokens): + _bsz, seq_len = tokens.size() + assert seq_len <= self.max_seq_len + h = self.tok_embeddings(tokens) + pos = torch.arange(0, seq_len, device=tokens.device) + p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim) + h = h + p + h = self.dropout(h) + for layer in self.layers: + if self.checkpoint_activations: + h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False) + else: + h = layer(h) + h = self.norm(h) + output = self.output(h).float() + return output + + @staticmethod + def parallelize( + module: "Transformer", device_mesh: DeviceMesh, use_seq_parallel: bool, local_output_for_attn: bool = False + ) -> nn.Module: + assert isinstance(module, Transformer), f"Requires Transformer but got {module}" + # Parallelize the root submodules. + if use_seq_parallel: + root_plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)), + "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)), + "norm": SequenceParallel(), + } + else: + root_plan = { + "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), + "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), + } + + module_tp = parallelize_module(module, device_mesh, root_plan) + # Parallelize the attention and feed forward submodules. + for layer in module_tp.layers: + layer_parallelize_plan = {} + if use_seq_parallel: + layer_parallelize_plan["attention"] = PrepareModuleInput( + input_layouts=Shard(1), + desired_input_layouts=Replicate(), + ) + # shard the RMSNorms + layer_parallelize_plan["attention_norm"] = SequenceParallel() + layer_parallelize_plan["ffn_norm"] = SequenceParallel() + layer_parallelize_plan["attention.wq"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wk"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wv"] = ColwiseParallel(use_local_output=local_output_for_attn) + layer_parallelize_plan["attention.wo"] = ( + RowwiseParallel(output_layouts=Shard(1)) + if use_seq_parallel + else RowwiseParallel() + ) + + layer_parallelize_plan["feed_forward.w1"] = ( + ColwiseParallel(input_layouts=Shard(1)) + if use_seq_parallel + else ColwiseParallel() + ) + layer_parallelize_plan["feed_forward.w2"] = ( + RowwiseParallel(output_layouts=Shard(1)) + if use_seq_parallel + else RowwiseParallel() + ) + + parallelize_module(layer, device_mesh, layer_parallelize_plan) + + # Parallelize the output submodule. If weight tying is enabled, we need to + # make sure output.weight is sharded consistently as tok_embeddings.weight, + # at the cost of the all_reduce operation using RowwiseParallel. + output_parallelize_plan = ( + ColwiseParallel( + input_layouts=Shard(1), + output_layouts=Replicate(), + ) + if use_seq_parallel + else ColwiseParallel(output_layouts=Replicate()) + ) + parallelize_module(module_tp.output, device_mesh, output_parallelize_plan) + + if local_output_for_attn: + for layer in module_tp.layers: + layer.attention.n_heads = module_tp.model_args.n_heads // device_mesh.size() + + # Manually set output.weight so that parameters and gradients are shared. + if module_tp.model_args.weight_tying: + module_tp.output.weight = module_tp.tok_embeddings.weight + + return module_tp + + +def skip_unless_torch_gpu(method: T) -> T: + """ + Test decorator which skips the test unless there's a GPU available to torch. + + >>> # xdoctest: +SKIP + >>> @skip_unless_torch_gpu + >>> def test_some_method(self) -> None: + >>> ... + """ + # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set. + return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method)) + + +class DTensorTestBase(MultiProcessTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def backend(self) -> str: + backend = "nccl" if self.device_type == "cuda" else "gloo" + return backend + + def build_device_mesh(self) -> DeviceMesh: + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def init_pg(self) -> None: + if "nccl" in self.backend and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + + if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl"]: + raise RuntimeError(f"Backend {self.backend} not supported!") + + dist.init_process_group( + backend=self.backend, + world_size=self.world_size, + rank=self.rank, # pyre-ignore[16] + init_method=f"file://{self.file_name}", # pyre-ignore[16] + ) + + # set device for nccl pg for collectives + if "nccl" in self.backend: + torch.cuda.set_device(self.rank) + + def destroy_pg(self) -> None: + # Wait for all ranks to reach here before starting shutdown. + # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895 + # dist.all_reduce(torch.zeros((1,), device="cuda" if torch.cuda.is_available() else "cpu")) + # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs: + # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion + dist.barrier() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + # pyre-ignore[2]: + def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None: + out = op_call(*args, **kwargs) + dtc = DTensorConverter(mesh, args, kwargs) + for d_args, d_kwargs in dtc: + # pyre can't find assertTrue anymore? + self.assertEqual(dtc.successful(), True) + d_out = op_call(*d_args, **d_kwargs) + self.assertEqual(d_out.full_tensor(), out) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +TestFunc = Callable[[object], object] + + +# wrapper to initialize comms (processgroup) +def with_comms(func: TestFunc) -> TestFunc: + assert func is not None + + @wraps(func) # pyre-ignore[6] + def wrapper( + self, *args: Tuple[object], **kwargs: Dict[str, Any] # type: ignore[misc] + ) -> None: + # if enough GPU we can use GPU, otherwise we fallback to CPU + if not torch.cuda.is_available() or torch.cuda.device_count() < self.world_size: + self.device_type = "cpu" + else: + self.device_type = DEVICE_TYPE + + self.init_pg() + + try: + func(self, *args, **kwargs) # type: ignore[misc] + except Exception as e: + dist.destroy_process_group() + raise e + + self.destroy_pg() + + return wrapper + + +class DTensorOpTestBase(MultiThreadedTestCase): + @property + def world_size(self) -> int: + return NUM_DEVICES + + @property + def device_type(self) -> str: + return DEVICE_TYPE + + def build_device_mesh(self): + return DeviceMesh(self.device_type, list(range(self.world_size))) + + def setUp(self) -> None: + super().setUp() + self._spawn_threads() + + +# This is a class for converting args/kwargs of an op into distributed args/kwargs +class DTensorConverter: + def __init__( + self, + mesh: DeviceMesh, + args: Tuple[object, ...], + kwargs: Dict[str, object], + ) -> None: + self.hit = 0 + self.miss = 0 + self.mesh = mesh + self.args = args + self.kwargs = kwargs + flatten_args, flatten_args_spec = tree_flatten(args) + flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs) + + self.flatten_args: List[object] = flatten_args + self.flatten_args_spec: TreeSpec = flatten_args_spec + self.flatten_kwargs: List[object] = flatten_kwargs + self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec + + choices_for_args = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + choices_for_args.append(self.gen_sharding_choices_for_arg(arg)) + + self.sharding_combs: Iterator[Sequence[Placement]] = iter( + itertools.product(*choices_for_args) + ) + + def successful(self) -> bool: + return self.hit > 0 and self.miss == 0 + + def is_supported_tensor(self, t: torch.Tensor) -> bool: + # TODO: dist tensor need to support quantized and sparse + # tensors, quantized tensor might be relatively easy, but + # sparse tensor have special layouts that we need to possibly + # deal with, until we are clear about them, we don't officially + # support them. + return not any( + [ + t.is_sparse_csr, + t.is_sparse, + t.is_mkldnn, + t.is_quantized, + t.is_nested, + torch._is_functional_tensor(t), + t.is_neg(), + t.is_conj(), + t.device.type in ("lazy", "meta"), + # We need a way to test if a tensor is batched but there + # is no official APi to do it + # torch._C._is_batched(t), + ] + ) + + def gen_sharding_choices_for_arg(self, arg: torch.Tensor) -> Sequence[Placement]: + mesh_size = self.mesh.size() + sharding_choices: List[Placement] = [Replicate()] + # c10d collective does not support bool tensor + # for bool tensor we treat it as replicated + if arg.dtype != torch.bool: + # only generating choices with: replicate, or sharding + # evenly on a dimension that could be sharded + sharding_choices = sharding_choices + [ + Shard(i) + for i, s in enumerate(arg.shape) + if s > 1 and s % mesh_size == 0 + ] + # TODO: add multi mesh choices + # all_choices = itertools.product( + # *(self.mesh.ndim * [sharding_choices]) + # ) + return sharding_choices + + def __iter__(self) -> "DTensorConverter": + return self + + def __next__(self) -> Tuple[Tuple[object, ...], Dict[str, object]]: + try: + next_sharding_choices = next(self.sharding_combs) + idx = 0 + + new_args: List[object] = [] + for arg in self.flatten_args: + if isinstance(arg, torch.Tensor): + new_args.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_args.append(arg) + + new_kwargs: List[object] = [] + for arg in self.flatten_kwargs: + if isinstance(arg, torch.Tensor): + new_kwargs.append( + self.to_dist_tensor( + arg, self.mesh, [next_sharding_choices[idx]] + ) + ) + idx += 1 + else: + new_kwargs.append(arg) + + return ( + tree_unflatten(new_args, self.flatten_args_spec), + tree_unflatten(new_kwargs, self.flatten_kwargs_spec), + ) + except StopIteration as e: + raise StopIteration from e + + def to_dist_tensor( + self, t: torch.Tensor, mesh: DeviceMesh, placements: List[Placement] + ) -> torch.Tensor: + if type(t) is torch.Tensor or type(t) is nn.Parameter: + if self.is_supported_tensor(t): + self.hit += 1 + if t.ndim == 0: + # scalar tensor by default will be replicated + r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim) + else: + # distribute non-scalar tensors + r = distribute_tensor(t, mesh, placements) + if type(t) is nn.Parameter: + r = nn.Parameter( # type: ignore[assignment] + r, requires_grad=r.requires_grad + ) + return r + else: + self.miss += 1 + return t + elif torch.overrides.is_tensor_like(t): + # Blindly converting tensor subclasses to dist tensor can cause + # unpredictable problems, we explicitly disable this conversion + # for now (i.e. we don't support DTensor holding tensor subclass + # until there's a strong reason later). + self.miss += 1 + return t + else: + raise RuntimeError(f"Trying to convert to DTensor, but got {type(t)}") diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b49bbc925583c001dbfaefb8ed328f73102da2fc --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py @@ -0,0 +1,51 @@ +# mypy: allow-untyped-defs + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import os +import shutil +import tempfile +from functools import wraps +from typing import Any, Callable, Dict, Optional, Tuple + +import torch.distributed as dist + + +def with_temp_dir( + func: Optional[Callable] = None, +) -> Optional[Callable]: + """ + Wrapper to initialize temp directory for distributed checkpoint. + """ + assert func is not None + + @wraps(func) + def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None: + if dist.is_initialized(): + # Only create temp_dir when rank is 0 + if dist.get_rank() == 0: + temp_dir = tempfile.mkdtemp() + print(f"Using temp directory: {temp_dir}") + else: + temp_dir = "" + object_list = [temp_dir] + + # Broadcast temp_dir to all the other ranks + os.sync() + dist.broadcast_object_list(object_list) + self.temp_dir = object_list[0] + os.sync() + else: + temp_dir = tempfile.mkdtemp() + print(f"No process group initialized, using temp directory: {temp_dir}") + self.temp_dir = temp_dir + + try: + func(self, *args, **kwargs) + finally: + if dist.is_initialized() and dist.get_rank() == 0: + shutil.rmtree(self.temp_dir, ignore_errors=True) + else: + shutil.rmtree(self.temp_dir, ignore_errors=True) + + return wrapper diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..c2ab9af9f197e86bd182ee6ad1b6d02970e8f9a0 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/common_state_dict.py @@ -0,0 +1,122 @@ +# mypy: allow-untyped-defs + +# Owner(s): ["oncall: distributed"] + +import copy +from itertools import chain +from typing import Any, Dict + +import torch +import torch.nn as nn +from torch.distributed._sharded_tensor import ShardedTensor +from torch.distributed._state_dict_utils import _gather_state_dict +from torch.distributed._tensor import DTensor +from torch.distributed.checkpoint.state_dict import ( + _PG, + _STATE, + set_state_dict, + StateDictOptions, +) + + +class VerifyStateDictMixin: + def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False): + if isinstance(dist_tensor, (DTensor, ShardedTensor)): + dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey") + + if offload_to_cpu: + orig_tensor = orig_tensor.cpu() + dist_tensor = dist_tensor.cpu() + self.assertTrue(isinstance(dist_tensor, torch.Tensor)) + self.assertTrue(torch.allclose(orig_tensor, dist_tensor)) + + def _verify_msd( + self, + msd: Dict[str, Any], + dist_msd: Dict[str, Any], + options: StateDictOptions = StateDictOptions(), + offload_to_cpu=False, + ) -> None: + if not options.ignore_frozen_params: + self.assertEqual(len(msd), len(dist_msd)) + for fqn, param in msd.items(): + dist_param = dist_msd.get(fqn, None) + if not options.ignore_frozen_params: + self.assertIsNotNone(dist_param, f"{fqn=}") + try: + self._compare_tensor(param, dist_param, offload_to_cpu) + except AssertionError as e: + raise AssertionError( + f"{fqn} has mismatched value {param} {dist_param}" + ) from e + elif dist_param is None: + self.assertFalse(param.requires_grad, f"{fqn=}") + + def _verify_osd( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + osd: Dict[str, Any], + dist_osd: Dict[str, Any], + ) -> None: + params = list(chain.from_iterable(g["params"] for g in optim.param_groups)) + param_pid_mapping = dict(zip(params, range(len(params)))) + fqn_pid_mapping = {} + for fqn, param in model.named_parameters(): + pid = param_pid_mapping[param] + fqn_pid_mapping[fqn] = pid + fqn_pid_mapping[pid] = fqn + # Check optimizer_state_dict state + + self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE])) + for pid, states in osd[_STATE].items(): + fqn = fqn_pid_mapping[pid] + dist_states = dist_osd[_STATE].get(fqn, None) + self.assertIsNotNone(dist_states, fqn) + self.assertEqual(len(states), len(dist_states)) + for key, state in states.items(): + dist_state = states.get(key, None) + self.assertIsNotNone(dist_state) + self._compare_tensor(state, dist_state) + + # Check optimizer_state_dict param_group + old_dist_osd_pg = dist_osd[_PG] + if len(osd[_PG]) != len(dist_osd[_PG]): + self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG])) + new_pg = copy.deepcopy(dist_osd[_PG][0]) + new_pg["params"] = [] + for dist_group in dist_osd[_PG]: + new_pg["params"].extend(dist_group["params"]) + dist_osd[_PG] = [new_pg] + + self.assertEqual(len(osd[_PG]), len(dist_osd[_PG])) + for group, dist_group in zip(osd[_PG], dist_osd[_PG]): + self.assertEqual(len(group), len(dist_group)) + for key, value in group.items(): + # Below doesn't work because param_groups can have None + # values. + # dist_value = dist_group.get(key, None) + # self.assertIsNotNone(dist_value, (dist_group, group)) + dist_value = dist_group[key] + if key == "params": + fqns = [fqn_pid_mapping[pid] for pid in value] + self.assertEqual(sorted(fqns), sorted(dist_value)) + else: + self.assertEqual(value, dist_value) + dist_osd[_PG] = old_dist_osd_pg + + def _verify_osd_by_load( + self, + model: nn.Module, + optim: torch.optim.Optimizer, + new_optim: torch.optim.Optimizer, + dist_osd: Dict[str, Any], + ) -> None: + new_dist_osd = _gather_state_dict(dist_osd) + set_state_dict( + model, + optimizers=new_optim, + model_state_dict={}, + optim_state_dict=new_dist_osd, + ) + self.assertEqual(optim.state_dict(), new_optim.state_dict()) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..00a67b20a73c7c8119a7ef08266b272da3fb890e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py @@ -0,0 +1,733 @@ +# mypy: allow-untyped-defs + +import contextlib +import enum +import logging +import os +import threading +from typing import NamedTuple + +import torch +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.nn as nn +from torch.distributed import rpc +from torch.distributed.nn import RemoteModule +from torch.nn.parallel import DistributedDataParallel +from torch.testing._internal.common_distributed import ( + requires_gloo, + requires_nccl, + skip_if_lt_x_gpu, + skip_if_rocm_multiprocess, +) +from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +NUM_EM_ROW = 2 +D_SPARSE = 3 +D_DENSE = 2 +D_HID = 3 +D_OUT = 1 +NUM_TRAINERS = 4 +# Trainers + the master + the remote worker +WORLD_SIZE = NUM_TRAINERS + 2 +TRAINER_RANKS = list(range(NUM_TRAINERS)) +REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1 +MASTER_RANK = REMOTE_WORKER_RANK + 1 + + +class DdpMode(enum.Enum): + # Don't apply DDP + NONE = enum.auto() + # Apply DDP to the top level nn.Module + OUTSIDE = enum.auto() + # Embed DDP inside the top level nn.Module + INSIDE = enum.auto() + + +def init_logger(): + logger = logging.getLogger(__name__) + level = logging.DEBUG if "debug" in os.environ else logging.INFO + logger.setLevel(level) + console = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + console.setFormatter(formatter) + console.setLevel(level) + # add the handlers to the logger + logger.addHandler(console) + logger.propagate = False + return logger + + +gLogger = init_logger() + + +class FeatureSet(NamedTuple): + """ A feature set has 2 types of features""" + + dense_features: torch.Tensor + sparse_features: torch.LongTensor + values: torch.Tensor + + +def _call_method(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +def _remote_method_async(method, rref, *args, **kwargs): + args_tup = tuple([method, rref] + list(args)) + return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) + + +class RemoteEM(nn.Module): + def __init__(self, num_embeddings: int, embedding_dim: int): + gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim) + super().__init__() + init_em = [0.5] * embedding_dim + self.em = nn.EmbeddingBag( + num_embeddings, + embedding_dim, + _weight=torch.tensor([init_em] * num_embeddings), + ) + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteEM.forward() on: %s", input) + return self.em(input, offsets=torch.LongTensor(range(input.shape[0]))) + + +# Return a linear module with predefined parameters. +def getLinear(d_in, d_out): + l = nn.Linear(d_in, d_out, bias=False) + w = torch.ones((d_out, d_in)) + w[0][0] = -1 + w.requires_grad_() + l.weight.data = w + return l + + +class RemoteNet(nn.Module): + def __init__(self, d_in: int, d_out: int): + gLogger.info("Initing RemoteNet with %s %s", d_in, d_out) + super().__init__() + self.fc = getLinear(d_in, d_out) + self.relu = nn.ReLU() + + def forward(self, input: torch.Tensor): + gLogger.debug("Running RemoteNet.forward() on: %s", input) + return self.relu(self.fc(input)) + + +class HybridModel(nn.Module): + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + process_group_for_ddp: dist.ProcessGroup = None, + ): + super().__init__() + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.fc1 = getLinear(D_DENSE, D_DENSE) + self.fc2 = getLinear(D_HID, D_OUT) + + self.non_ddp_params = tuple(self.fc1.parameters()) + tuple( + self.fc2.parameters() + ) + self.ddp_params = () + + if process_group_for_ddp is not None: + self.non_ddp_params, self.ddp_params = ( + tuple(self.fc1.parameters()), + tuple(self.fc2.parameters()), + ) + gLogger.info("Use DDP for the second local net.") + self.fc2 = DistributedDataParallel( + self.fc2, check_reduction=True, process_group=process_group_for_ddp + ) + + gLogger.info( + "HybridModel has %s groups of parameters.", len(list(self.parameters())) + ) + + def forward(self, input: FeatureSet): + gLogger.debug("Running HybridModel.forward on %s", input) + sparse = _remote_method( + RemoteEM.forward, self.remote_em_rref, input.sparse_features + ) + # The same size of mini batch. + assert sparse.shape[0] == input.dense_features.shape[0] + dense = self.fc1(input.dense_features) + x = torch.cat((dense, sparse), 1) + gLogger.debug("Concatenated feature: %s", x) + x = _remote_method(RemoteNet.forward, self.remote_net_rref, x) + return self.fc2(x) + + +class Trainer: + def __init__( + self, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ddp_mode: DdpMode, + rank: int, + ): + self.rank = rank + self.trainer_group = ( + dist.new_group(TRAINER_RANKS) + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE) + else None + ) + self.remote_em_rref = remote_em_rref + self.remote_net_rref = remote_net_rref + self.hybrid_module = HybridModel( + self.remote_em_rref, + self.remote_net_rref, + self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None, + ) + self.ddp_params, self.non_ddp_params = ( + self.hybrid_module.ddp_params, + self.hybrid_module.non_ddp_params, + ) + if ddp_mode == DdpMode.OUTSIDE: + gLogger.info("Wrapping the whole hybrid module into DDP.") + self.ddp_params += self.non_ddp_params + self.non_ddp_params = () + self.hybrid_module = DistributedDataParallel( + self.hybrid_module, + check_reduction=True, + process_group=self.trainer_group, + ) + gLogger.info( + "Succeeded in creating a HybridModel instance with " + "%s ddp params and %s other local params.", + len(self.ddp_params), len(self.non_ddp_params) + ) + + def destroy_pg(self): + if self.trainer_group: + dist.destroy_process_group(self.trainer_group) + + def train_batch( + self, + mini_batch: FeatureSet, + trainer_has_less_inputs: bool, + simulate_uneven_inputs: bool, + ): + grads_dict = None + + if not simulate_uneven_inputs: + input_batches = [mini_batch] + else: + # Split into microbatches, and trim to simulate uneven inputs. + dense_features = mini_batch.dense_features + sparse_features = mini_batch.sparse_features + values = mini_batch.values + + dense_microbatch = torch.split(dense_features, 2) + sparse_microbatch = torch.split(sparse_features, 2) + values_microbatch = torch.split(values, 2) + batches = [] + for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch): + feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v) + batches.append(feature_set) + + if trainer_has_less_inputs: + input_batches = batches[: len(batches) // 2] + gLogger.info( + "Trainer reduced input patches from %s " + "to %s to simulate uneven inputs.", + len(batches), len(input_batches) + ) + else: + input_batches = batches + + with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext(): + for b in input_batches: + with dist_autograd.context() as context_id: + output = self.hybrid_module.forward(b) + loss = (output * mini_batch.values).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info( + "Loss is %s for mini batch: %s. " + "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict + ) + return ( + tuple(grads_dict[param] for param in self.ddp_params), + tuple(grads_dict[param] for param in self.non_ddp_params), + ) + + +def get_training_examples(): + n = 16 + training_examples = FeatureSet( + dense_features=torch.zeros((n, D_DENSE)), + sparse_features=torch.zeros(n, dtype=torch.long), + values=torch.zeros(n), + ) + idx = 0 + # Every example has another one that has exactly the same features but an + # opposite value. Therefore, their grads cancel each other in all-reduce. + for value in (-1, 1): + for x in (-1.0 * value, 1.0 * value): + for y in (1.0 * value, -1.0 * value): + for z in (0, 1): + training_examples.dense_features[idx, :] = torch.tensor((x, y)) + training_examples.sparse_features[idx] = z + training_examples.values[idx] = value + idx += 1 + + # Split the examples among NUM_TRAINERS trainers + assert 0 == (n % NUM_TRAINERS) + examples_per_trainer = int(n / NUM_TRAINERS) + return [ + FeatureSet( + dense_features=training_examples.dense_features[ + start : start + examples_per_trainer, : + ], + sparse_features=training_examples.sparse_features[ + start : start + examples_per_trainer + ], + values=training_examples.values[start : start + examples_per_trainer], + ) + for start in range(0, n, examples_per_trainer) + ] + + +shutdown_signal = threading.Condition() + + +def set_shutdown_signal(): + global shutdown_signal + with shutdown_signal: + shutdown_signal.notify() + + +class DdpUnderDistAutogradTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return WORLD_SIZE + + def remote_worker_name(self) -> str: + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{REMOTE_WORKER_RANK}" + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + def _remote_worker_process(self, ddp_mode): + gLogger.info("The remote worker is running.") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting remote worker.") + dist.destroy_process_group() + + def _trainer_process(self, rank: int): + gLogger.info("Running the trainer #%s...", rank) + gLogger.info( + "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS + ) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + gLogger.info("Waiting for shutdown signal on trainer #%s...", rank) + + global shutdown_signal + with shutdown_signal: + shutdown_signal.wait() + gLogger.info("Exiting the trainer #%s...", rank) + dist.destroy_process_group() + + def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool): + gLogger.info("Running the master process...") + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_em_rref = rpc.remote( + self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE) + ) + remote_net_rref = rpc.remote( + self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID) + ) + gLogger.info("Created remote rrefs on master") + self.do_test_on_master( + ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref + ) + + def do_test_on_master( + self, + ddp_mode: DdpMode, + simulate_uneven_inputs: bool, + remote_em_rref: rpc.RRef, + remote_net_rref: rpc.RRef, + ): + if simulate_uneven_inputs: + gLogger.info( + "Running DDP + RPC test with simulating uneven inputs across trainers." + ) + + trainer_rrefs = [] + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + trainer_rrefs.append( + rpc.remote( + trainer, + Trainer, + args=(remote_em_rref, remote_net_rref, ddp_mode, rank), + ) + ) + + if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): + # new_group needs to be called on ranks. + dist.new_group(TRAINER_RANKS) + + training_examples = get_training_examples() + for _ in range(3): + futures = [] + num_trainers = len(trainer_rrefs) + for idx, trainer_rref in enumerate(trainer_rrefs): + # Half the trainers will deplete inputs earlier than the rest. + trainer_has_less_inputs = ( + simulate_uneven_inputs and idx < num_trainers // 2 + ) + futures.append( + _remote_method_async( + Trainer.train_batch, + trainer_rref, + training_examples[idx], + trainer_has_less_inputs, + simulate_uneven_inputs, + ) + ) + + for future in futures: + ddp_grads, non_ddp_grads = future.wait() + # When there are uneven inputs, it is not necessary that grads + # cancel each other out, since some trainers contribute 0 grad. + if not simulate_uneven_inputs: + for grad in ddp_grads: + self.assertEqual( + grad, + torch.zeros_like(grad), + msg=f"The grad for any ddp parameter should be zeros, because " + "the training examples' grads cancel each other. Received " + f"gradient {grad}", + ) + for grad in non_ddp_grads: + self.assertNotEqual( + grad, + torch.zeros_like(grad), + msg="The grad for any non-ddp parameter shouldn't be zeros", + ) + + # Destroy process groups + for idx, trainer_rref in enumerate(trainer_rrefs): + _remote_method_async(Trainer.destroy_pg, trainer_rref).wait() + + # Send shutdown signals. + for rank in TRAINER_RANKS: + trainer = self.trainer_name(rank) + rpc.rpc_sync(trainer, set_shutdown_signal, args=()) + + rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=()) + + def _do_test(self, ddp_mode, simulate_uneven_inputs=False): + if self.rank == MASTER_RANK: + self._master_process(ddp_mode, simulate_uneven_inputs) + elif self.rank == REMOTE_WORKER_RANK: + self._remote_worker_process(ddp_mode) + elif self.rank in TRAINER_RANKS: + self._trainer_process(self.rank) + else: + raise RuntimeError(f"Unknown process rank: {self.rank}") + + @requires_gloo() + @dist_init + def test_backward_no_ddp(self): + self._do_test(DdpMode.NONE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside(self): + self._do_test(DdpMode.OUTSIDE) + + @requires_gloo() + @dist_init + def test_backward_ddp_outside_uneven_inputs(self): + self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_backward_ddp_inside(self): + self._do_test(DdpMode.INSIDE) + + +# Common utils for both CPU and CUDA test suites +class CommonDdpComparisonTest(RpcAgentTestFixture): + @property + def world_size(self) -> int: + return NUM_TRAINERS + + def trainer_name(self, rank): + # The name has to be consistent with that in 'dist_init' decorator. + return f"worker{rank}" + + @staticmethod + def get_remote_grads(rref, context_id): + return dist_autograd.get_gradients(context_id)[rref.local_value().weight] + + +class DdpComparisonTest(CommonDdpComparisonTest): + def _run_test_ddp_comparision(self, simulate_uneven_inputs=False): + gLogger.info("Running trainer rank: %s", self.rank) + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + # Postfix file_name with "pg" since file_name is also used by RPC agent + init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"), + world_size=self.world_size, + rank=self.rank, + ) + net = nn.Linear(2, 3) + ddp_net = DistributedDataParallel(net) + + # Odd ranks join early if simulate_uneven_inputs. + num_inputs = 1 + if simulate_uneven_inputs: + if self.rank % 2 == 0: + num_inputs += 2 + inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)] + + if simulate_uneven_inputs: + gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list)) + + # Use distributed autograd. The gradients will be in RPC context map. + grads_dict = {} + with ddp_net.join(simulate_uneven_inputs): + for i, inputs in enumerate(inputs_list): + with dist_autograd.context() as context_id: + loss = ddp_net(inputs).norm() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict) + + # Use local autograd. The gradients will be in each variable's '.grad'. + ddp_net.zero_grad() + loss = ddp_net(inputs).norm() + loss.backward() + + # The gradients should be the same + for param in net.parameters(): + self.assertTrue( + param in grads_dict, + msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}", + ) + self.assertEqual( + grads_dict[param], + param.grad, + msg=f"The grads for param {param} are different under local " + f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}", + ) + dist.destroy_process_group() + + @requires_gloo() + @dist_init + def test_ddp_comparison(self): + self._run_test_ddp_comparision() + + @requires_gloo() + @dist_init + def test_ddp_comparison_uneven_inputs(self): + # test with simulating uneven inputs in DDP + self._run_test_ddp_comparision(simulate_uneven_inputs=True) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_sparse_grads(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + model = nn.EmbeddingBag(10, 3, sparse=True) + ddp_model = DistributedDataParallel(model) + + # Different inputs for each + input = torch.LongTensor(10).random_(0, 10) + offsets = torch.LongTensor([0, 4]) + + # Run local. + loss = ddp_model(input, offsets).sum() + loss.backward() + + with dist_autograd.context() as context_id: + loss = ddp_model(input, offsets).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads_dict)) + self.assertEqual(model.weight.grad, grads_dict[model.weight]) + + @requires_gloo() + @dist_init + def test_ddp_dist_autograd_local_vs_remote(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + # Use two different remote device input string, w/ and w/o the default + # device string "cpu", respectively. + for remote_device in ["worker0/cpu", "worker0"]: + remote_layer1 = RemoteModule( + remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False) + ) + layer1 = nn.Linear(10, 5, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + # Run local case. + layer2 = nn.Linear(5, 1) + inputs = torch.rand((10, 10)) + ddp_model = DistributedDataParallel(layer2) + loss = ddp_model(layer1(inputs)).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_model(remote_layer1(inputs)).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + + +class CudaDdpComparisonTest(CommonDdpComparisonTest): + @skip_if_lt_x_gpu(NUM_TRAINERS) + @requires_nccl() + @dist_init + @skip_if_rocm_multiprocess + def test_ddp_dist_autograd_local_vs_remote_gpu(self): + # Each trainer uses a different random seed. Otherwise, they are going + # to have exactly the same initial model parameters, input, and + # therefore grads. That means the grads will be the same before and + # after DDP's all-reduce. + torch.manual_seed(self.rank) + dist.init_process_group( + backend="gloo", + init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), + world_size=self.world_size, + rank=self.rank, + ) + + remote_layer1 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False) + ) + layer1 = nn.Linear(10, 7, False) + # Start with the same parameters for remote and local + layer1.weight = remote_layer1.module_rref.to_here().weight + + layer2 = nn.Linear(7, 5).cuda(self.rank) + ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank]) + + remote_layer3 = RemoteModule( + remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False) + ) + layer3 = nn.Linear(5, 3, False) + # Start with the same parameters for remote and local + layer3.weight = remote_layer3.module_rref.to_here().weight + + layer4 = nn.Linear(3, 1).cuda(self.rank) + ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank]) + + # Run local case. + inputs = torch.rand((10, 10)) + loss = ddp_layer4( + layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank) + ).sum() + loss.backward() + + # Run remote case. + with dist_autograd.context() as context_id: + loss = ddp_layer4( + remote_layer3( + ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu() + ).cuda(self.rank) + ).sum() + dist_autograd.backward(context_id, [loss]) + grads_dict = dist_autograd.get_gradients(context_id) + dist.barrier() + self.assertEqual( + layer1.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer1.module_rref, context_id), + ), + ) + self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) + self.assertEqual( + layer3.weight.grad, + rpc.rpc_sync( + "worker0", + CommonDdpComparisonTest.get_remote_grads, + args=(remote_layer3.module_rref, context_id), + ), + ) + self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight]) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py new file mode 100644 index 0000000000000000000000000000000000000000..981c8e59580649a838c924b87d7e1478ef00de91 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py @@ -0,0 +1,10423 @@ +# mypy: allow-untyped-defs + +import copy +import json +import itertools +import math +import os +import random +import sys +import tempfile +import time +from collections import namedtuple, OrderedDict, defaultdict +from contextlib import contextmanager, nullcontext +from dataclasses import dataclass +from datetime import timedelta +from functools import reduce +from typing import Union, NamedTuple, Callable, Any +import unittest +import numpy as np +import torch +import torch.cuda +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.averagers as averagers +import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD +import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils +import torch.nn as nn +import torch.nn.functional as F +from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR +from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT +from torch.utils._python_dispatch import TorchDispatchMode +from torch.autograd import DeviceType +from torch.cuda.amp import GradScaler, autocast + +from torch.distributed.algorithms.ddp_comm_hooks import ( + post_localSGD_hook as post_localSGD, + powerSGD_hook as powerSGD, + default_hooks as default, + quantization as quantization_hooks, +) +from torch.distributed.optim import _apply_optimizer_in_backward + +from torch.distributed.distributed_c10d import ( + get_world_size, + _get_default_group, + _get_pg_config, +) +from torch.distributed.utils import ( + _verify_param_shape_across_processes, + _sync_module_states, +) +from torch.profiler import ( + ExecutionTraceObserver, + ProfilerActivity, +) + +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars, _MixedPrecision +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + init_multigpu_helper, + initialize_temp_directories, + cleanup_temp_dir, + simple_sparse_reduce_tests, + skip_if_rocm_multiprocess, + skip_if_small_worldsize, + skip_if_odd_worldsize, + skip_if_lt_x_gpu, + nccl_skip_if_lt_x_gpu, + skip_if_no_gpu, + require_n_gpus_for_nccl_backend, + requires_nccl_version, + captured_output, + with_nccl_blocking_wait, + with_dist_debug_levels, + verify_ddp_error_logged, + DistTestCases, +) +from torch.testing._internal.common_utils import ( + instantiate_parametrized_tests, + IS_MACOS, + IS_WINDOWS, + FILE_SCHEMA, + IS_FBCODE, + NO_MULTIPROCESSING_SPAWN, + IS_SANDCASTLE, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, +) + +import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer + +from torch.utils.data.distributed import DistributedSampler +import operator + +try: + import torchvision + + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False + +if sys.platform == "win32": + import msvcrt +else: + import fcntl + + +class NetWithBuffers(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + self.buffer.add_(1) + return self.b(self.a(x)) + + +class Foo: + def __init__(self, x): + # Can be tensor or int + self.x = x + + def __eq__(self, other): + def eq(value, other): + if isinstance(value, torch.Tensor): + return torch.equal(value, other) + return value == other + + for attr, value in self.__dict__.items(): + other_value = other.__dict__[attr] + if not eq(value, other_value): + return False + return True + + +f = Foo(10) +f.bar = 1 + +foo_cpu_tensor = Foo(torch.randn(3, 3)) + + +COLLECTIVES_OBJECT_TEST_LIST = [ + {"key1": 3, "key2": 4, "key3": {"nested": True}}, + f, + foo_cpu_tensor, + "foo", + [1, 2, True, "string", [4, 5, "nested"]], +] + +# Allowlist of distributed backends where profiling collectives is supported. +PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.NCCL, + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported with use_cuda=True +CUDA_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported for p2p ops +SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.MPI, + dist.Backend.GLOO, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Dummy NamedTuple data structures to test DDP support for NamedTuple types. +EXPECTED_FIELDS = ("a", "b") +TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS) + + +class TestNamedTupleInput_1(NamedTuple): + a: torch.tensor + b: torch.tensor + + +skipIfNoTorchVision = skip_but_pass_in_sandcastle_if( + not HAS_TORCHVISION, "no torchvision" +) + +BACKEND = os.environ["BACKEND"] +INIT_METHOD = os.getenv("INIT_METHOD", "env://") + +DEFAULT_TIMEOUT = 300 +CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500} + + +def get_profiling_event(event_name, profiler, dedup_gpu_user_annotation=False): + event_list = ( + profiler.events() + if isinstance(profiler, torch.profiler.profile) + else profiler.function_events + ) + return [ + event for event in event_list + if ( + (event.name.endswith(event_name) or event.name.startswith(event_name)) + and (not dedup_gpu_user_annotation or event.device_type != DeviceType.CUDA) + ) + ] + +def get_profiler_nccl_meta(prof): + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We will need to test metadata obtained from profiler here""" + tf = tempfile.NamedTemporaryFile( + mode="w+t", suffix=".json", delete=False + ) + tf.close() + trace_file = tf.name + + prof.export_chrome_trace(trace_file) + with open(trace_file) as f: + events = json.load(f)["traceEvents"] + print(f"Trace saved to {trace_file}") + + # Comment to debug + os.remove(trace_file) + + return [e for e in events if e.get("name") == "record_param_comms"] + +# Base error message substring on unfinished reductions. +ddp_prev_reduction_unfinished_str = ( + "Expected to have finished reduction in the prior iteration" +) +# Error message substring when find_unused_parameters=True has not been passed +ddp_recommend_find_unused_params_str = ( + "passing the keyword argument `find_unused_parameters=True`" +) +# Error message substring when find_unused_parameters=True is enabled +ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled" +# Error message substring for possibility of not all model outputs being used +# in loss computation +ddp_outputs_not_used_in_loss_str = ( + "`forward` function outputs participate in calculating loss" +) +# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG +ddp_suggest_debug_mode_str = ( + "set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL" +) + + +class DDPUnevenTestInput(NamedTuple): + name: str + model: nn.Module + inp: Union[torch.tensor, tuple] + sync_interval: int + throw_on_early_termination: bool = False + hook: Callable = None + state: Any = None + + +class _FC2(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc = nn.Linear(10, 50, bias=True) + self.fc.bias.requires_grad = False + + def forward(self, x): + x = self.fc(x) + return x + + +class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(2, 10, bias=False) + self.fc2 = _FC2() + self.fc3 = nn.Linear(50, 4, bias=False) + self.relu = nn.ReLU() + self.no_grad_param = nn.Parameter( + torch.tensor([2, 2]).long(), requires_grad=False + ) + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return F.softmax(x, dim=1) + + +class LargeNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(1000, 2000, bias=False) + self.fc2 = nn.Linear(2000, 500, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + +class Task(nn.Module): + def __init__(self) -> None: + super().__init__() + self.p = nn.Parameter(torch.ones(2, 2)) + + def forward(self, x): + return self.p + x + + +class BatchNormNet(nn.Module): + def __init__(self, affine=True): + super().__init__() + self.fc1 = nn.Linear(2, 40, bias=False) + self.bn = nn.BatchNorm1d(4, affine=affine) + self.fc2 = nn.Linear(40, 4, bias=False) + + def forward(self, x): + x = torch.reshape(self.fc1(x), (-1, 4, 10)) + x = self.bn(x) + x = torch.reshape(x, (-1, 40)) + x = self.fc2(x) + return F.softmax(x, dim=1) + + +class UnusedParamTwoLinLayerNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 10, bias=False) + self.c = nn.Linear(5, 5, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class DictOutputModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.module = UnusedParamTwoLinLayerNet() + + def forward(self, x): + predictions = self.module(x) + loss = (predictions[0] + predictions[1]).sum() + return { + "predictions": predictions, + "loss": loss, + } + + +class TwoLinLayerNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class EmbeddingNetDifferentParams(nn.Module): + """ + A module containing an embedding with different dimension or different # of + parameters depending on the rank. + """ + + def __init__(self, rank, diff_num_params=False): + super().__init__() + embedding_dim = 500 if diff_num_params or rank == 0 else 50 + self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim) + self.lin = nn.Linear(embedding_dim, 1) + if diff_num_params: + self.lin2 = nn.Linear(1, 1, bias=False) + + def forward(self, x): + x = self.embedding(x) + return self.lin(x) + + +class ControlFlowToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + # Second layer is used dependent on input x. + use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device)) + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + +DDP_NET = Net() +BN_NET = BatchNormNet() +BN_NET_NO_AFFINE = BatchNormNet(affine=False) +ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99) + + +def get_timeout(test_id): + test_name = test_id.split(".")[-1] + if test_name in CUSTOMIZED_TIMEOUT: + return CUSTOMIZED_TIMEOUT[test_name] + else: + return DEFAULT_TIMEOUT + + +default_pg_timeout = 60 + +CUSTOM_PG_TIMEOUT = { + # This test runs slowly and needs additional time to complete, otherwise can + # be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING + "test_ddp_uneven_inputs": 300, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_model_diff_across_ranks": 5, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_has_finalized": 5, +} + +def require_backend_is_available(backends): + def check(backend): + if backend == dist.Backend.GLOO: + return dist.is_gloo_available() + if backend == dist.Backend.NCCL: + return dist.is_nccl_available() + if backend == dist.Backend.MPI: + return dist.is_mpi_available() + if backend == dist.Backend.UCC: + return dist.is_ucc_available() + if backend in DistTestCases.backend_feature["plugin"]: + return True + return False + + if BACKEND not in backends: + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be one of {backends}" + ) + + if not check(dist.Backend(BACKEND)): + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be available" + ) + return lambda func: func + + +def require_world_size(world_size): + if int(os.environ["WORLD_SIZE"]) < world_size: + return skip_but_pass_in_sandcastle( + "Test requires world size of %d" % world_size + ) + return lambda func: func + + +@contextmanager +def _lock(): + TEMP_DIR = os.environ["TEMP_DIR"] + lockfile = os.path.join(TEMP_DIR, "lockfile") + with open(lockfile, "w") as lf: + try: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1) + yield + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_EX) + yield + finally: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_UN) + lf.close() + + +@contextmanager +def _rank_temp_file(): + if dist.get_rank() == 0: + fd, name = tempfile.mkstemp() + os.close(fd) + else: + name = None + object_list = [name] + dist.broadcast_object_list(object_list) + name = object_list[0] + try: + yield name + finally: + if dist.get_rank() == 0: + os.remove(name) + + +def _build_tensor(size, value=None, dtype=torch.float, device_id=None): + if value is None: + value = size + if device_id is None: + return torch.empty(size, size, size, dtype=dtype).fill_(value) + else: + return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id) + + +def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float): + if value is None: + value = dim + return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value) + + +def _create_autograd_profiler(): + return torch.autograd.profiler.profile(record_shapes=True) + + +def _create_torch_profiler(): + return torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + ], + record_shapes=True, + ) + + +class Barrier: + barrier_id = 0 + + @classmethod + def init(cls): + cls.barrier_id = 0 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + for f_name in os.listdir(barrier_dir): + os.unlink(os.path.join(barrier_dir, f_name)) + + @classmethod + def sync(cls, wait_for=None, timeout=10): + if wait_for is None: + wait_for = dist.get_world_size() + cls.barrier_id += 1 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + pid = str(os.getpid()) + barrier_file = os.path.join(barrier_dir, pid) + with _lock(): + with open(barrier_file, "w") as f: + f.write(str(cls.barrier_id)) + + start_time = time.time() + while True: + arrived = 0 + with _lock(): + for f_name in os.listdir(barrier_dir): + with open(os.path.join(barrier_dir, f_name)) as f: + data = f.read() + if int(data) >= cls.barrier_id: + arrived += 1 + if arrived == wait_for: + break + + if time.time() - start_time > timeout: + raise RuntimeError("barrier timeout") + time.sleep(0.1) + + +class TestDistBackend(MultiProcessTestCase): + @classmethod + def setUpClass(cls): + os.environ["MASTER_ADDR"] = str(MASTER_ADDR) + # Not setting MASTER_PORT and get a random free port + super().setUpClass() + + def setUp(self): + super().setUp() + # initialize temp directories + initialize_temp_directories() + # initialize Barrier + Barrier.init() + # Skip return code checking for following tests as they are expected to + # crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING. + self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__] + + def tearDown(self): + cleanup_temp_dir() + super().tearDown() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + @classmethod + def _run(cls, rank, test_name, file_name, pipe, **kwargs): + if BACKEND == "nccl" and not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + if torch.cuda.is_available() and torch.cuda.device_count() < int( + self.world_size + ): + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + try: + pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout) + timeout = timedelta(seconds=pg_timeout_seconds) + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=int(self.world_size), + rank=self.rank, + timeout=timeout, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + self._barrier() + + self.run_test(test_name, pipe) + self._barrier() + dist.destroy_process_group() + sys.exit(0) + + # Needed since MultiProcessTestCase assumes a world_size of 4, but we + # run these tests under other various world_sizes. + @property + def world_size(self): + return os.environ["WORLD_SIZE"] + + +class DistributedTest: + class _DistTestBase: + def _barrier(self, *args, **kwargs): + Barrier.sync(*args, **kwargs) + + def _init_group_test(self, **kwargs): + group = [1, 2] + group_id = dist.new_group(group, **kwargs) + rank = dist.get_rank() + if rank not in group: + return ([], None, rank) + + return (group, group_id, rank) + + def _init_full_group_test(self, **kwargs): + group = list(range(0, dist.get_world_size())) + group_id = dist.new_group(**kwargs) + rank = dist.get_rank() + return (group, group_id, rank) + + def _init_global_test(self): + group = list(range(0, dist.get_world_size())) + group_id = dist.group.WORLD + rank = dist.get_rank() + return (group, group_id, rank) + + def _verify_buffers_equal(self, m1, m2): + # verify buffers across models + m1_buf_dict = dict(m1.module.named_buffers()) + for name, buf in m2.module.named_buffers(): + self.assertEqual(buf, m1_buf_dict[name]) + + # Verify buffers across ranks. + m1_buffers = list(m1.buffers()) + m2_buffers = list(m2.buffers()) + for (buf1, buf2) in zip(m1_buffers, m2_buffers): + gathered_bufs = [ + torch.empty_like(buf1) for _ in range(dist.get_world_size()) + ] + dist.all_gather(gathered_bufs, buf1) + gathered_bufs_m2 = [ + torch.empty_like(buf2) for _ in range(dist.get_world_size()) + ] + for b in gathered_bufs: + self.assertEqual(b, buf1) + dist.all_gather(gathered_bufs_m2, buf2) + for b in gathered_bufs_m2: + self.assertEqual(b, buf2) + + def _sanity_check_profiler_nccl_meta(self, nccl_meta_events): + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We test for basic fields in this profiler event that correspond to the nccl communication + collectives""" + per_coll_meta = defaultdict(list) + for e in nccl_meta_events: + args = e.get("args", {}) + collname = args.get("Collective name", "") + self.assertNotEqual(collname, "") + self.assertNotEqual(args.get("dtype", ""), "") + + per_coll_meta[collname].append(args) + if collname in {"wait"}: + continue + + self.assertEqual(args["Process Group Description"], "default_pg") + self.assertNotEqual(args["Process Group Ranks"], "") + + self.assertGreaterEqual(args.get("In msg nelems", -1), 0) + self.assertGreaterEqual(args.get("Out msg nelems", -1), 0) + self.assertGreaterEqual(args.get("Group size", -1), 0) + self.assertGreaterEqual(args.get("Global rank start", -1), 0) + self.assertGreaterEqual(args.get("Global rank stride", -1), 0) + + # print(per_coll_meta) + return per_coll_meta + + def test_dump_DDP_relevant_env_vars(self): + with captured_output() as (out, _): + _dump_DDP_relevant_env_vars() + lines = out.getvalue().splitlines() + + def format_line(var): + return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" + + # Check relevant env vars + vars = [ + "MASTER_ADDR", + "MASTER_PORT", + "WORLD_SIZE", + "NCCL_TOPO_DUMP_FILE", # N/A + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + for var in vars: + line = format_line(var) + self.assertIn(line, lines) + # Check irrelevant env vars + vars = [ + "xxx", + "yyy", + "zzz", + ] + for var in vars: + line = format_line(var) + self.assertNotIn(line, lines) + + # GET RANK + def test_get_rank(self): + test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir") + pid = str(os.getpid()) + num_processes = dist.get_world_size() + with open(os.path.join(test_dir, pid), "w") as f: + f.write(str(dist.get_rank())) + + self._barrier() + + all_ranks = set() + for f_name in os.listdir(test_dir): + with open(os.path.join(test_dir, f_name)) as f: + all_ranks.add(int(f.read())) + self.assertEqual(len(all_ranks), num_processes) + + self._barrier() + + if dist.get_rank() == 0: + for f_name in os.listdir(test_dir): + os.unlink(os.path.join(test_dir, f_name)) + + self._barrier() + + def test_get_backend(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + backend_str = BACKEND.lower() + self.assertEqual(dist.get_backend(), backend_str) + if dist.get_rank() in group: + self.assertEqual(dist.get_backend(group_id), backend_str) + else: + with self.assertRaisesRegex( + ValueError, "Invalid process group specified" + ): + dist.get_backend(group_id) + + def test_Backend_enum_class(self): + # test parsing + backend = BACKEND.lower() + self.assertEqual(dist.Backend(BACKEND.upper()), backend) + self.assertEqual(dist.Backend(BACKEND), backend) + with self.assertRaises(ValueError): + dist.Backend(None) + with self.assertRaises(ValueError): + dist.Backend(3) + with self.assertRaises(ValueError): + dist.Backend(["gloo"]) + + # Test destroy + def test_destroy_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of group + def test_get_rank_size_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + if dist.get_rank() in group: + self.assertEqual(dist.get_world_size(group_id), 2) + self.assertTrue(dist.get_rank(group_id) in list(range(2))) + else: + self.assertEqual(dist.get_world_size(group_id), -1) + self.assertEqual(dist.get_rank(group_id), -1) + + # Test destroy full groups + def test_destroy_full_group(self): + _, group_id, _ = self._init_full_group_test() + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of full group + def test_get_rank_size_full_group(self): + _, group_id, _ = self._init_full_group_test() + self.assertEqual(dist.get_world_size(group_id), dist.get_world_size()) + self.assertEqual(dist.get_rank(group_id), dist.get_rank()) + + def _test_barrier_timeout(self, group_id, timeout): + local_rank = dist.get_rank(group_id) + + # Only execute barrier on rank == 0, causing it to timeout + if local_rank == 0: + expected_time = time.time() + timeout.total_seconds() + # In debug mode, we execute a monitored_barrier before the + # collective, so assert on that. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + exception_ctx = self.assertRaisesRegex( + Exception, "failed to pass monitoredBarrier" + ) + else: + exception_ctx = self.assertRaisesRegex( + Exception, " (Timed out|closed|timeout) " + ) + with exception_ctx: + dist.barrier(group_id) + self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1) + else: + pass + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + @skip_but_pass_in_sandcastle_if( + not INIT_METHOD.startswith("file://"), + "Requires file:// initialization method. " + + "Both tcp:// and env:// rely on the TCP store for which " + "reinitialization has proven racy.", + ) + def test_barrier_timeout_global(self): + dist.destroy_process_group() + + # Explicitly pass world size to the barrier because we've + # just destroyed any state in torch.distributed. + self._barrier(wait_for=int(os.environ["WORLD_SIZE"])) + + # Reinitialize global process group + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + self._test_barrier_timeout(dist.group.WORLD, timeout) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_group(self): + timeout = timedelta(seconds=5) + _, group_id, _ = self._init_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_full_group(self): + timeout = timedelta(seconds=1) + _, group_id, _ = self._init_full_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + # This test helper can only be used when using the Gloo or NCCL backend + # **and** both the Gloo and NCCL backends are available. + # See the @skip annotations below. + def _test_group_override_backend(self, initializer): + if BACKEND == "gloo": + new_backend = "nccl" + elif BACKEND == "nccl": + new_backend = "gloo" + elif BACKEND in DistTestCases.backend_feature["plugin"]: + new_backend = "gloo" + + group, group_id, rank = initializer(backend=new_backend) + if group_id is None: + return + + if new_backend == "gloo": + self.assertTrue(group_id._get_backend_name(), "gloo") + if new_backend == "nccl": + self.assertTrue(group_id._get_backend_name(), "nccl") + + self.assertEqual(rank, group[dist.get_rank(group_id)]) + self.assertEqual(len(group), dist.get_world_size(group_id)) + + # Pin device (so we avoid NCCL race conditions/deadlocks). + group_rank = dist.get_rank(group_id) + torch.cuda.set_device(group_rank) + + # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL). + tensor = _build_tensor(2, value=group_rank).cuda() + dist.broadcast(tensor, src=group[0], group=group_id) + self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu")) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_world_size(3) + @skip_if_lt_x_gpu(2) + def test_backend_group(self): + self._test_group_override_backend(self._init_group_test) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @unittest.skipIf(BACKEND == "ucc", "broken, see https://github.com/pytorch/pytorch/pull/113620") + def test_backend_full_group(self): + self._test_group_override_backend(self._init_full_group_test) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(2) + def test_new_subgroups(self): + subgroup_size = 2 + cur_subgroup, subgroups = dist.new_subgroups(subgroup_size) + + world_size = dist.get_world_size() + self.assertEqual(cur_subgroup.size(), subgroup_size) + self.assertEqual(len(subgroups), world_size / subgroup_size) + self.assertFalse(dist._rank_not_in_group(cur_subgroup)) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_group_size_exceeds_world_size(self): + with self.assertRaisesRegex(ValueError, "must not exceed"): + dist.new_subgroups(100) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_world_size_not_divisible_by_group_size(self): + with self.assertRaisesRegex( + ValueError, "The world size must be divisible by 'group_size'" + ): + dist.new_subgroups(3) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + cur_subgroup, subgroups = dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 2], [1, 3]] + ) + if device_id >= 4: + self.assertIsNone(cur_subgroup) + else: + self.assertEqual(cur_subgroup.size(), 2) + self.assertEqual(len(subgroups), 2) + if device_id == 0 or device_id == 2: + self.assertEqual(cur_subgroup, subgroups[0]) + else: + self.assertEqual(cur_subgroup, subgroups[1]) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + world_size = get_world_size(group_id) + + with self.assertRaisesRegex( + RuntimeError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 1], [world_size, 2]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_by_enumeration_negative_input_rank(self): + group, group_id, rank = self._init_global_test() + + with self.assertRaisesRegex( + ValueError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[-1, -2], [-3, -4]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_overlap_not_allowed(self): + with self.assertRaisesRegex( + ValueError, "Rank 1 has appeared in both subgroup" + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0], [1, 2], [1, 3]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_average_parameters(self): + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Sequential( + nn.Conv2d(3, 3, kernel_size=3, padding=1), + nn.ReLU(), + nn.Linear(1, 5, bias=False), + ).cuda(device_id) + # Test global model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=None + ) + # Every element will be the same as the input. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data)) + + # Test partial model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) * rank + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=group_nccl + ) + if not dist._rank_not_in_group(group_nccl): + # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * 0.5) + else: + # Every element on device not in the subgroup should remain the same. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager_param_group(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + opt = torch.optim.SGD(model.parameters(), lr=0.1) + + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + for param_group in opt.param_groups: + for params in param_group["params"]: + # mock grad + params.grad = torch.ones_like(param.data) * rank + params.data = torch.ones_like(param.data) * rank + averager.average_parameters(opt.param_groups) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, + torch.ones_like(param.data) + * sum(range(world_size)) + / world_size, + ) + else: + # No model averaging, so the parameters are not updated. + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, torch.ones_like(param.data) * rank + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager( + self, + ): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = hierarchicalSGD.HierarchicalModelAverager( + # Run the global averaging at a period of 4, + # which is equivalent to the above periodic model averaging test case. + period_group_size_dict=OrderedDict([(period, world_size)]), + warmup_steps=warmup_steps, + ) + + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_3_level_hierarchical_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + # Set up such a hierarchical model averaging as follows: + # after the first 10 warmup steps, + # run model averaging every 2 steps within each subgroup of size 2, + # run model averaging every 4 steps within each subgroup of size 3, + # and run the global model averaging every 8 steps. + # If there is a conflict in model averaging at a step, only run the highest-level model averaging. + warmup_steps = 10 + subgroup_size1 = 2 + subgroup_avg_period1 = 2 + subgroup_size2 = 4 + subgroup_avg_period2 = 4 + global_avg_period = 8 + period_group_size_dict = OrderedDict( + [ + (subgroup_avg_period1, subgroup_size1), + (subgroup_avg_period2, subgroup_size2), + (global_avg_period, world_size), + ] + ) + averager = hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps + ) + self.assertEqual(dist.get_pg_count(), len(period_group_size_dict)) + + subgroup1 = averager.period_process_group_dict[subgroup_avg_period1] + subgroup2 = averager.period_process_group_dict[subgroup_avg_period2] + real_group_ranks_res1 = _get_pg_config(subgroup1)['ranks'] + real_group_ranks_res2 = _get_pg_config(subgroup2)['ranks'] + + expect_group_ranks_res1 = ( + rank // subgroup_size1 * subgroup_size1 + + np.array(list(range(subgroup_size1))) + ).tolist() + expect_group_ranks_res2 = ( + rank // subgroup_size2 * subgroup_size2 + + np.array(list(range(subgroup_size2))) + ).tolist() + self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1) + self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2) + + expected_avg_tensor_within_subgroup1 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res1) + / subgroup_size1 + ) + expected_avg_tensor_within_subgroup2 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res2) + / subgroup_size2 + ) + expected_global_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + for step in range(0, 25): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step == 16 or step == 24: + # Run global model averaging when `step` can be divided by 8. + self.assertEqual(param.data, expected_global_avg_tensor) + elif step == 12 or step == 20: + # Run model averaging within subgroup when `step` can be divided by 4 but not by 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup2) + elif step == 10 or step == 14 or step == 18 or step == 22: + # Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup1) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + # Coalescing manager (sync mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(): + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # Coalescing manager (async mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager_async(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(async_ops=True) as cm: + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + cm.wait() + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # NCCL Batch SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + recv_tensors = [None for _ in range(world_size)] + expected_tensors = [None for _ in range(world_size)] + + for val in ["1", "0"]: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val + for src in range(0, world_size): + send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_( + src + ) + recv_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(-1) + expected_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(rank) + recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + for src in range(0, world_size): + self.assertEqual(recv_tensors[src], expected_tensors[src]) + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_ring_exchange_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + send_tensor = _build_tensor(world_size, device_id=device_id) + recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id) + send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size) + recv_op = dist.P2POp( + dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size + ) + reqs = dist.batch_isend_irecv([send_op, recv_op]) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_self_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + p2p_op_list = [] + + if rank == 0: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, 0) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, 0) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_no_rank_zero_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + if rank == 1: + peer = 2 + elif rank == 2: + peer = 1 + + if rank in [1, 2]: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, peer) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, peer) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU with provided tags + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo_tags(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # NCCL Batch SEND RECV Op Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + with self.assertRaisesRegex(ValueError, "^Invalid ``op``"): + send_tensor = _build_tensor(rank + 1, device_id=device_id) + send_op = dist.P2POp(dist.broadcast, send_tensor, 1) + dist.batch_isend_irecv([send_op]) + + # NCCL Batch SEND RECV p2p_op_list Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_list_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"): + dist.batch_isend_irecv([1, 2]) + + # NCCL Batch SEND RECV Mixed Backend Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_mixed_backend_err(self): + self._barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + group_gloo = dist.new_group(ranks=[0, 1], backend="gloo") + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + if rank == 0: + with self.assertRaisesRegex( + ValueError, "All ops need to use the same group" + ): + send_tensor = _build_tensor(rank + 1) + send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo) + send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl) + dist.batch_isend_irecv([send_op_gloo, send_op_nccl]) + + # NCCL SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def _test_send_recv_nccl(self, profiler_ctx=None): + # TODO: now that nccl send/recv is supported, there does not seem to + # be a need to have nccl send/recv be tested separately. + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + tensor = _build_tensor(rank + 1, device_id=device_id) + profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext() + with profiler_cls as prof: + for src in range(0, world_size): + if src == rank: + # Send mode + for dst in range(0, world_size): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + expected_tensor = _build_tensor(src + 1) + output_tensor = _build_tensor( + src + 1, value=-1, device_id=device_id + ) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof, dedup_gpu_user_annotation=True) + self.assertTrue(events) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.input_shapes in expected_shapes) + + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl(self): + self._test_send_recv_nccl() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl_autograd_profiler(self): + profiler_ctx = torch.autograd.profiler.profile(record_shapes=True) + self._test_send_recv_nccl(profiler_ctx) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_nccl_torch_profiler(self): + profiler_ctx = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + record_shapes=True, + ) + self._test_send_recv_nccl(profiler_ctx) + + # SEND RECV + def _test_send_recv(self, profiler_ctx): + rank = dist.get_rank() + send_size = rank + 1 + tensor = _build_tensor(send_size) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for src in range(0, dist.get_world_size()): + if src == rank: + # Send mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + recv_size = src + 1 + expected_tensor = _build_tensor(recv_size) + output_tensor = _build_tensor(recv_size, value=-1) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks. + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.is_async) + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl" + ) + def test_send_recv(self): + self._test_send_recv(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv(profiler_ctx=torch_profiler_ctx) + + # SEND RECV ANY SOURCE + def _test_send_recv_any_source(self, profiler_ctx): + rank = dist.get_rank() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + recv_ranks = [] + irecv_ranks = [] + + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, dist.get_world_size()): + if dst == rank: + # Recv mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + + for recv in ["recv", "irecv"]: + output_tensor = _build_tensor(send_recv_size, value=-1) + + if recv == "recv": + sender = dist.recv(output_tensor) + recv_ranks.append(sender) + elif recv == "irecv": + work = dist.irecv(output_tensor) + work.wait() + sender = work._source_rank() + irecv_ranks.append(sender) + + # Assert the scalar value "sender" that should be + # equal to the rank of the sender is equal to all + # values in the received tensor. + self.assertTrue(output_tensor.eq(sender).all()) + else: + # Send mode + dist.send(tensor, dst) # recv + dist.send(tensor, dst) # irecv + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from other rank twice. + self.assertEqual( + sum(event.count for event in events), + 2 * (dist.get_world_size() - 1), + ) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + # Each rank would have 2 * (world_size - 1) sends, verify that + # globally we receive the same amount on the other end. + recv_ranks_tensor = torch.cat( + (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0 + ) + global_recv_ranks = [ + torch.empty_like(recv_ranks_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(global_recv_ranks, recv_ranks_tensor) + global_recv_ranks_list = [] + for tensor in global_recv_ranks: + global_recv_ranks_list += tensor.tolist() + + from itertools import groupby + + global_recv_ranks_list.sort() + frequency = [ + len(list(group)) for key, group in groupby(global_recv_ranks_list) + ] + self.assertEqual(dist.get_world_size(), len(frequency)) + self.assertEqual( + [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source(self): + self._test_send_recv_any_source(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_any_source_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx) + + # SEND RECV WITH TAG + def _test_send_recv_with_tag(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, world_size): + if dst == rank: + # Recv mode + for src in range(0, world_size): + if src == rank: + continue + output_tensor = _build_tensor(send_recv_size, value=-1) + dist.recv(output_tensor, src, tag=src) + self.assertTrue(output_tensor.eq(src).all()) + else: + # Send mode + dist.send(tensor, dst, tag=rank) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, event_name) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag(self): + self._test_send_recv_with_tag(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_with_tag_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx) + + # ISEND + def _test_isend(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + if rank == 0: + requests = [ + dist.isend(_build_tensor(dest, 10), dest) + for dest in range(1, world_size) + ] + for request in requests: + request.wait() + self.assertTrue(request.is_completed()) + else: + tensor = _build_tensor(rank, -1) + dist.recv(tensor, 0) + self.assertEqual(tensor, _build_tensor(rank, 10)) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + expected_event_name = ( + f"{backend}:send" if rank == 0 else f"{backend}:recv" + ) + events = get_profiling_event(expected_event_name, prof) + event_count = sum(e.count for e in events) + expected_count = dist.get_world_size() - 1 if rank == 0 else 1 + self.assertEqual(expected_count, event_count) + # Event ordering is not guaranteed, so simply ensure the shapes are + # found in the following map. + expected_shapes = { + r: [[r] * 3] for r in range(1, dist.get_world_size()) + } + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, expected_event_name) + if rank == 0: + self.assertTrue( + event.input_shapes in expected_shapes.values() + ) + else: + self.assertEqual(event.input_shapes, expected_shapes[rank]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend(self): + self._test_isend(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_isend(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_isend_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + self._test_isend(profiler_ctx=torch_profiler_ctx) + + # IRECV + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support irecv" + ) + def test_irecv(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + + if rank == 0: + expected_tensors = [ + _build_tensor(src, -1) for src in range(1, world_size) + ] + requests = [ + dist.irecv(expected_tensors[src - 1], src) + for src in range(1, world_size) + ] + + for src in range(1, world_size): + requests[src - 1].wait() + self.assertTrue(requests[src - 1].is_completed()) + self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10)) + else: + tensor = _build_tensor(rank, 10) + dist.send(tensor, 0) + + self._barrier() + + # BROADCAST + def _test_broadcast_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + with_options=False, + ): + for dtype, value, requires_cuda in [ + (torch.float, -1e-10, False), + (torch.double, -1e-100, False), + (torch.half, -0.1, True), + (torch.int8, -2, False), + (torch.uint8, 129, False), + (torch.int, -1e5, False), + (torch.long, -1e15, False), + ]: + if requires_cuda and not cuda: + continue + for src in group: + expected_tensor = _build_tensor(src + 1, value, dtype) + if cuda: + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + if rank == src: + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", + True, + group_id.broadcast, + [expected_tensor], + opts, + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + expected_tensor, + src, + group_id, + ) + else: + tensor = _build_tensor(src + 1, -1, dtype) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", True, group_id.broadcast, [tensor], opts + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + tensor, + src, + group_id, + ) + self.assertEqual(tensor.size(), expected_tensor.size()) + self.assertEqual( + tensor.ne(expected_tensor).max(), torch.tensor(False) + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast(self): + group, group_id, rank = self._init_global_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and Nccl backend supports CUDA allReduce", + ) + @skip_if_no_gpu + def test_broadcast_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_group(self): + group, group_id, rank = self._init_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "Only NCCL backend supports high priority stream", + ) + @skip_if_no_gpu + def test_nccl_high_priority_stream(self): + group, _, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + new_port = str(MASTER_PORT + 1) + os.environ["MASTER_PORT"] = new_port + gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size()) + store, rank, size = next(gen_iterator) + store = dist.PrefixStore(new_port, store) + + opts = dist.ProcessGroupNCCL.Options() + opts.is_high_priority_stream = False + group_id = dist.ProcessGroupNCCL(store, rank, size, opts) + + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True) + + # REDUCE + def _test_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensor = _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensor, + src, + op, + group_id, + tensor_shapes=[tensor.shape], + ) + if rank == src: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # REDUCE TWICE + def _test_reduce_twice_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensors = [ + _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + for i in range(2) + ] + if cuda: + for i in range(2): + tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensors[0], + src, + op, + group_id, + secondary_op_call=lambda: dist.reduce( + tensors[1], src, op, group_id + ), + tensor_shapes=[tensors[0].shape], + ) + if rank == src: + for tensor in tensors: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum_twice(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda_twice(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports reduce_scatter_v" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_scatter_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + input_split_sizes = [] + for src in group: + input_split_sizes.append(src + 1) + start_len = sum(input_split_sizes[:rank]) + end_len = start_len + input_split_sizes[rank] + sum_len = sum(input_split_sizes) + master_value = 2 + worker_value = 10 + + for async_val in [True, False]: + tensor = _build_tensor(sum_len, worker_value, device_id=device_id) + tensor[start_len:end_len].fill_(master_value) + out_tensor = ( + torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(-1) + .cuda(device_id) + ) + + req = dist.reduce_scatter( + out_tensor, + list(torch.split(tensor, input_split_sizes)), + dist.ReduceOp.SUM, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = 2 + (10 * (len(group) - 1)) + expected_tensor = torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test reduce_scatter_tensor accepting single tensor as input + def _reduce_scatter_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + tensor_shapes = [tensor_out.shape] + self.call_dist_op( + ":reduce_scatter_tensor", + False, + dist.reduce_scatter_tensor, + tensor_out, + tensor_in, + dist.ReduceOp.SUM, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor" + ) + @skip_if_no_gpu + def test_reduce_scatter_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_out = torch.zeros(size, dtype=torch.int64) + + # Concatenated input + tensor_in = torch.arange(len(group) * size) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + # Stacked input + tensor_in = torch.reshape(tensor_in, (len(group), size)) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + # Should be the same as the result in concatenated case + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def call_dist_op( + self, + profiling_title_postfix, + is_async, + op, + *args, + expect_event=True, + secondary_op_call=None, + profile_cuda=False, + tensor_shapes=None, + **kwargs, + ): + op_calls = [lambda: op(*args, **kwargs)] + if secondary_op_call is not None: + op_calls.append(secondary_op_call) + + autograd_profiler_ctx = torch.autograd.profiler.profile( + use_cuda=profile_cuda, record_shapes=True + ) + + # TODO: move this test to use torch.profiler once kineto issues are + # fixed internally. + with autograd_profiler_ctx as prof: + works = [op_call() for op_call in op_calls] + if is_async: + for work in works: + work.wait() + + if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS: + # We are only interested in the backend's implementation not the dispatcher wrapper. + events = get_profiling_event( + dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx + ) + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if dist.get_debug_level() != dist.DebugLevel.DETAIL: + self.assertEqual(len(events), len(op_calls)) + for e in events: + self.assertTrue(e.is_async) + self.assertEqual(e.count, 1) + self.assertGreaterEqual(e.cpu_time, 0) + # Verify tensor shapes if given + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if ( + tensor_shapes is not None + and dist.get_debug_level() != dist.DebugLevel.DETAIL + ): + self.assertEqual( + e.input_shapes, + tensor_shapes, + f"event shape: {e.input_shapes} vs tensor {tensor_shapes}", + ) + + # ALL REDUCE + def _test_all_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + async_op=False, + ): + for src in group: + curr_value = master_value if rank == src else worker_value + + tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if tensor.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor).shape] + else: + tensor_shapes = [tensor.shape] + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + tensor_shapes=tensor_shapes, + ) + # Currently, only Gloo backend has profiling tested with CUDA enabled. + # Only run cuda profiling test for one rank to speed up since + # running with different src_rank does not affect the correctness. + if ( + src == 0 + and cuda + and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS + ): + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + profile_cuda=True, + tensor_shapes=tensor_shapes, + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_async(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_async(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_complex_unsupported_ops(self): + unsupported_ops = [ + dist.ReduceOp.MAX, + dist.ReduceOp.MIN, + dist.ReduceOp.PRODUCT, + dist.ReduceOp.BAND, + dist.ReduceOp.BOR, + dist.ReduceOp.BXOR, + ] + group, group_id, rank = self._init_global_test() + for unsupported_op in unsupported_ops: + with self.assertRaisesRegex( + ValueError, "all_reduce does not support" + ): + dist.all_reduce( + _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_complex(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # SPARSE ALL REDUCE + def _test_sparse_all_reduce_sum(self, fn): + group, group_id, rank = self._init_global_test() + + tests = simple_sparse_reduce_tests( + rank, dist.get_world_size(), num_inputs=1 + ) + for (inputs, outputs) in tests: + tensors = [fn(input) for input in inputs] + dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id) + self.assertEqual(tensors[0], outputs[0]) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + def test_sparse_all_reduce_sum(self): + self._test_sparse_all_reduce_sum(lambda t: t) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + @skip_if_no_gpu + def test_sparse_all_reduce_sum_cuda(self): + self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda()) + + # ALL REDUCE - COALESCED + @staticmethod + def _all_reduce_coalesced_sum_test_cases(group_size): + return ( + [2, 3, complex(2, 3)], + [10, 11, complex(10, 11)], + [ + 2 + 10 * (group_size - 1), + 3 + 11 * (group_size - 1), + complex(2, 3) + complex(10, 11) * (group_size - 1), + ], + [torch.float, torch.float, torch.cfloat], + ) + + @staticmethod + def _all_reduce_coalesced_product_test_cases(group_size): + return ( + [1, 2], + [3, 4], + [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_min_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [1, 3], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_max_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [2, 4], + [torch.float, torch.float], + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_coalesced_max_complex_unsupported(self): + group, group_id, rank = self._init_global_test() + with self.assertRaisesRegex(ValueError, "all_reduce does not support"): + dist.all_reduce_coalesced( + [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id + ) + + def _test_all_reduce_coalesced_helper( + self, + group, + group_id, + rank, + op, + cuda=False, + rank_to_GPU=None, + ): + test_case_func = { + dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases, + dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases, + dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases, + dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases, + }[op] + + master_values, worker_values, expected_values, dtypes = test_case_func( + len(group) + ) + + for src in group: + curr_values = master_values if rank == src else worker_values + tensors = [ + _build_tensor(src + 1, val, dtype=dtype) + for dtype, val in zip(dtypes, curr_values) + ] + if cuda: + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + tensor_shapes = [] + for tensor in tensors: + if tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(tensor).shape) + else: + tensor_shapes.append(tensor.shape) + self.call_dist_op( + ":all_reduce", + False, + dist.all_reduce_coalesced, + tensors, + op, + group_id, + tensor_shapes=tensor_shapes, + ) + expected_tensors = [ + _build_tensor(src + 1, expected_value, dtype=dtype) + for dtype, expected_value in zip(dtypes, expected_values) + ] + self.assertEqual(tensors, expected_tensors) + + self._barrier() + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + # SCATTER + def _test_scatter_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, -1, dtype=dtype) + expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = ( + [_build_tensor(dest + 1, i, dtype=dtype) for i in group] + if rank == dest + else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(t).shape for t in tensors] + else: + tensor_shapes = [t.shape for t in tensors] + self.call_dist_op( + ":scatter", + False, + dist.scatter, + tensor, + src=dest, + scatter_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(tensor, expected_tensor) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify scatter_list argument only on source rank. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, src=0, scatter_list=scatter_list) + else: + dist.scatter(output, src=0) + self.assertEqual(output, one * rank) + + # Don't specify src argument. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, scatter_list=scatter_list) + else: + dist.scatter(output) + self.assertEqual(output, one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_complex(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_scatter_group(self): + group, group_id, rank = self._init_group_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_scatter_helper(group, group_id, rank) + + # GATHER + def _test_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank) + tensors = ( + [_build_tensor(dest + 1, -1) for i in group] if rank == dest else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + self.call_dist_op( + ":gather", + False, + dist.gather, + tensor, + dst=dest, + gather_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None, + ) + if rank == dest: + expected_tensors = [_build_tensor(dest + 1, i) for i in group] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify gather_list argument only on destination rank. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, dst=0, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank, dst=0) + + # Don't specify dst argument. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather(self): + group, group_id, rank = self._init_global_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_gather_helper(group, group_id, rank) + + # ALL GATHER + def _test_all_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group] + allgather = dist.all_gather + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if tensors[0].dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensors[0]).shape] + else: + tensor_shapes = [tensors[0].shape] + self.call_dist_op( + ":all_gather", + False, + allgather, + tensors, + tensor, + group_id, + False, + tensor_shapes=tensor_shapes, + ) + + expected_tensors = [ + _build_tensor(dest + 1, i, dtype=dtype) for i in group + ] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports all_gather_v" + ) + @skip_if_no_gpu + def test_all_gather_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + output_split_sizes = [] + for dst in group: + output_split_sizes.append(dst + 1) + sum_len = sum(output_split_sizes) + value = 2 + + for async_val in [True, False]: + tensor = ( + torch.empty( + output_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(value) + .cuda(device_id) + ) + out_tensor = _build_tensor(sum_len, -1, device_id=device_id) + + req = dist.all_gather( + list(torch.split(out_tensor, output_split_sizes)), + tensor, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = value + expected_tensor = _build_tensor( + sum_len, expected_value, device_id=device_id + ) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test all_gather accepting single tensor as output + def _all_gather_into_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + if tensor_out.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor_in).shape] + else: + tensor_shapes = [tensor_in.shape] + self.call_dist_op( + ":all_gather_into_tensor", + False, + dist.all_gather_into_tensor, + tensor_out, + tensor_in, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_cat_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Concatenated output + tensor_out = torch.ones([len(group) * size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Concatenate all blocks into a bigger tensor + expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_stack_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Stacked output + tensor_out = torch.ones([len(group), size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Stack all blocks into a bigger tensor + expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def _run_all_gather_coalesced_and_verify( + self, output_tensor_lists, input_tensors, expected_tensors, group_id + ): + """ + Helper that runs all_gather_coalesced and returns true if output + matches expectations. + """ + tensor_shapes = [] + for input_tensor in input_tensors: + if input_tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(input_tensor).shape) + else: + tensor_shapes.append(input_tensor.shape) + self.call_dist_op( + ":all_gather", + False, + dist.all_gather_coalesced, + output_tensor_lists, + input_tensors, + group_id, + tensor_shapes=tensor_shapes, + ) + + for l1, l2 in zip(output_tensor_lists, expected_tensors): + for t1, t2 in zip(l1, l2): + if not torch.equal(t1, t2): + return False + return True + + def _test_all_gather_coalesced_helper( + self, group, group_id, rank, dtype=torch.float + ): + # TODO: Instead we should probably go through _rank_not_in_group + # mechanism to disable sending tensors + if group_id is not None: + for test_case_id in range(2, 5): + # Make sure we create tensors of incompatible sizes, e.g. + # [1], [2x2], [3x3x3] ... to be sent in one batch + input_tensors = [ + _build_multidim_tensor( + tensor_id, tensor_id, rank + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + output_tensor_lists = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, -1, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for _ in group + ] + expected_tensors = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for rank_iter in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensor_lists, input_tensors, expected_tensors, group_id + ), "output tensors do not match expected outputs" + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_simple(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_with_empty(self): + group, group_id, rank = self._init_global_test() + input_tensors = [ + rank * torch.ones([2, 2]), + torch.ones([0]), + (rank + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + output_tensors_lists = [ + [ + -1 * torch.ones([2, 2]), + -1 * torch.ones([0]), + -1 * torch.ones([3, 3]), + -1 * torch.ones([0]), + -1 * torch.ones([0]), + ] + for _ in group + ] + expected_tensors = [ + [ + r * torch.ones([2, 2]), + torch.ones([0]), + (r + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + for r in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensors_lists, input_tensors, expected_tensors, group_id + ) + self._barrier() + + # AllToAll + def _test_all_to_all_single_equal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_tensor = torch.ones([size, size], dtype=dtype) * rank + expected_tensor = torch.cat( + [torch.ones([1, size], dtype=dtype) * i for i in group] + ) + out_tensor = torch.ones([size, size], dtype=dtype) * -1 + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(in_tensor).shape] + else: + tensor_shapes = [in_tensor.shape] + self.call_dist_op( + ":all_to_all", + False, + dist.all_to_all_single, + out_tensor, + in_tensor, + group=group_id, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_single_unequal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + out_splits = [rank + 1 for _ in group] + in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank + out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype) + expected_tensor = torch.cat( + [torch.ones([rank + 1, size], dtype=dtype) * i for i in group] + ) + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + dist.all_to_all_single( + out_tensor, in_tensor, out_splits, in_splits, group=group_id + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + in_tensors = [ + torch.ones([in_splits[i], size], dtype=dtype) * rank + for i, _ in enumerate(group) + ] + out_tensors = [ + torch.ones([(rank + 1), size], dtype=dtype) for _ in group + ] + expected_tensors = [ + torch.ones([rank + 1, size], dtype=dtype) * i for i in group + ] + if cuda: + in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors] + expected_tensors = [ + t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors + ] + out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors] + dist.all_to_all(out_tensors, in_tensors, group=group_id) + for t1, t2 in zip(out_tensors, expected_tensors): + self.assertEqual(t1, t2) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + @skip_if_small_worldsize + def test_all_to_all_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_small_worldsize + @skip_if_rocm_multiprocess + def test_all_to_all_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + # BARRIER + def _test_barrier_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + WAIT_TIME = 0.3 # seconds + + for dest in group: + expected_time = torch.DoubleTensor(1).fill_(0.0) + if cuda: + expected_time = expected_time.cuda(rank_to_GPU[rank][0]) + if dest == rank: + expected_time.fill_(time.time() + WAIT_TIME) + dist.broadcast(expected_time, dest, group_id) + time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer + dist.barrier(group_id) + else: + dist.broadcast(expected_time, dest, group_id) + dist.barrier(group_id) + self.assertGreaterAlmostEqual( + float(time.time()), + float(expected_time[0]), + msg="destination rank: %d, my rank: %d" % (dest, rank) + + " (if you see this failure, please report in #14554)", + ) + + # Use higher timeout for the instance where the test runs + # against a subgroup and uses a CUDA tensor for expected time. + # The CUDA initialization for the participating processes can + # take long enough for the barrier timeout to trigger on the + # process that doesn't participate in the group. + self._barrier(timeout=20) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + def test_barrier_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier(self): + group, group_id, rank = self._init_global_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_group(self): + group, group_id, rank = self._init_group_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_barrier_helper(group, group_id, rank) + + def _model_step(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad = None + + def _model_step_with_zero_grad(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad.requires_grad_(False) + param.grad.zero_() + + def _prepare_dummy_data(self, local_bs): + # global_bs for DDP should be divisible by WORLD_SIZE + world_size = int(os.environ["WORLD_SIZE"]) + global_bs = world_size * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + return global_bs, input_cpu, target, loss + + # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL + def _test_DDP_helper( + self, model, input_var, target, loss, scale_factor=1.0, memory_format=None + ): + model.train() + output = model(input_var) + l = loss(output, target) * scale_factor + l.backward() + if memory_format is not None: + self.assertTrue(output.is_contiguous(memory_format=memory_format)) + + def _assert_equal_param(self, param_gpu, param_DDP): + self.assertEqual(len(param_gpu), len(param_DDP)) + for p_gpu, p_DDP in zip(param_gpu, param_DDP): + self.assertEqual(p_gpu, p_DDP) + + def _test_DDP_niter( + self, + model_base, + model_DDP, + input, + target, + loss, + local_bs, + rank, + batch_size, + test_save, + offset=None, + world_size=0, + zero_grad=False, + memory_format=None, + n_iter=5, + ): + for idx in range(n_iter): + # single cpu/gpu training + self._test_DDP_helper( + model_base, input, target, loss, memory_format=memory_format + ) + + if offset is None: + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + world_size * local_bs / batch_size if world_size != 0 else 1, + memory_format=memory_format, + ) + + # Update weights and run a second iteration to shake out errors + if zero_grad: + self._model_step_with_zero_grad(model_base) + self._model_step_with_zero_grad(model_DDP) + else: + self._model_step(model_base) + self._model_step(model_DDP) + self._assert_equal_param( + list(model_base.parameters()), list(model_DDP.module.parameters()) + ) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + # save the model in the middle and reload + if test_save and idx == 2 and INIT_METHOD.startswith("file://"): + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + with tempfile.TemporaryFile() as tmp_file: + torch.save(model_DDP, tmp_file) + tmp_file.seek(0) + # weights_only=False as this is legacy code that saves the model + saved_model = torch.load(tmp_file, weights_only=False) + for k in model_DDP.state_dict(): + self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k]) + + def _test_DistributedDataParallel( + self, + gpu_subset, + rank, + output_device=None, + gradient_as_bucket_view=False, + static_graph=False, + set_static_graph_twice=False, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = DDP_NET + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = copy.deepcopy(model) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=gpu_subset, + gradient_as_bucket_view=gradient_as_bucket_view, + static_graph=static_graph, + ) + + if set_static_graph_twice: + model_DDP._set_static_graph() + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + # dummy data initialization + local_bs = len(gpu_subset) + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False): + # Run a simple end to end DDP-CPU model, use result of single node + # model as baseline + group, group_id, rank = self._init_global_test() + + # cpu training setup + model_base = DDP_NET + + # DDP-CPU training setup + model_DDP = copy.deepcopy(model_base) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, gradient_as_bucket_view=gradient_as_bucket_view + ) + + # dummy data initialization + local_bs = 2 + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_base, + model_DDP, + input_cpu, + target, + loss, + local_bs, + rank, + global_bs, + False, + zero_grad=True, + ) + self._barrier() + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU(self): + self._test_DistributedDataParallelCPU() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU_grad_is_view(self): + self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_DistributedDataParallel_requires_grad(self): + # a module without gradients shouldn't be accepted + self.assertRaises( + RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_zero_output_features(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10) + self.relu = nn.ReLU() + self.net2 = nn.Linear(10, 0) + + model = ToyModel().to(self.rank) + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + + @skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test") + def test_ddp_create_graph(self): + class Model(nn.Module): + def __init__(self) -> None: + super().__init__() + self.p = nn.Parameter(torch.tensor(1.0)) + + def forward(self): + return self.p.pow(2) + + model = Model() + ddp_model = torch.nn.parallel.DistributedDataParallel(model) + for _ in range(6): + # Verify DDP doesn't throw when ran with create_graph=True. + # Although we do warn about potential issues, please see + # https://github.com/pytorch/pytorch/issues/63929 for details. + ddp_model().backward(create_graph=True) + # grad tensors should require grad. + self.assertTrue( + all(param.requires_grad for param in ddp_model.parameters()) + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedDataParallel_non_default_stream(self): + stream = torch.cuda.Stream(self.rank) + rank = self.rank + with torch.cuda.stream(stream): + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank] + ) + for i in range(1000): + # Clear gradients manually + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net.module.weight.grad + avg = grad.clone() + # All-reducing the gradient averages should give us the gradient + # average. If not, then one of the workers has not correctly + # written back the averaged gradient before this all-reduce call. + dist.all_reduce(avg) + world_size = int(os.environ["WORLD_SIZE"]) + avg.div_(world_size) + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual( + avg[0, 0], + expected_grad, + msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_comm_hook_logging(self): + hooks = [ + default.allreduce_hook, + default.fp16_compress_hook, + powerSGD.powerSGD_hook, + powerSGD.batched_powerSGD_hook, + quantization_hooks.quantization_pertensor_hook, + quantization_hooks.quantization_perchannel_hook, + ] + + cpp_builtin_hooks = [ + dist.BuiltinCommHookType.ALLREDUCE, + dist.BuiltinCommHookType.FP16_COMPRESS, + ] + + for hook in hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model.register_comm_hook(None, hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__) + + for hook in cpp_builtin_hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model._register_builtin_comm_hook(hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook)) + + # No hook registered + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + # After second forward pass, hook should still be empty string + for i in range(2): + inp = torch.ones(1, 1, device=self.rank) + loss = ddp_model(inp).sum() + loss.backward() + + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("comm_hook", ""), "") + + def _test_ddp_hook_with_optimizer_parity( + self, + grad_as_bucket_view, + static_graph, + optim_cls, + optimize_subset, + *functional_optim_args, + **functional_optim_kwargs, + ): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + models_to_test = [ + (LargeNet(), torch.randn(1, 1000).cuda()), + ] + if HAS_TORCHVISION: + models_to_test.append( + (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda()) + ) + for (model, inp) in models_to_test: + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + # Create DDP model that runs optimizer in fused fashion. + ddp_model_with_optimizer_hook = ( + torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + ) + + # Create DDP model with no hook that does optimizer after + # backward. + ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + hook_params = ddp_model_with_optimizer_hook.parameters() + no_hook_params = ddp_model_with_no_hook.parameters() + if optimize_subset: + hook_params = list(hook_params) + no_hook_params = list(no_hook_params) + self.assertGreater(len(hook_params), 0) + hook_params = [hook_params[0]] + no_hook_params = [no_hook_params[0]] + + # Register a fused optimizer that will run optimizer in step + # with allreduce. + + if optimize_subset: + # API where optim_params is specified. + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + optim_params=hook_params, + **functional_optim_kwargs, + ) + else: + # API where optim_params is omitted + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + **functional_optim_kwargs, + ) + + optimizer_no_hook = optim_cls( + no_hook_params, + *functional_optim_args, + **functional_optim_kwargs, + ) + + # Verify parameters are equal initially. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Save old parameters to later verify optimizer modified them. + opt_hook_init_params = copy.deepcopy( + list(ddp_model_with_optimizer_hook.parameters()) + ) + + # Run optimizer with hook model. + for i in range(6): + ddp_model_with_optimizer_hook.zero_grad() + out = ddp_model_with_optimizer_hook(inp) + loss = out.sum() + loss.backward() + + dist.barrier() + + # Run regular model. + for i in range(6): + ddp_model_with_no_hook.zero_grad() + out = ddp_model_with_no_hook(inp) + loss = out.sum() + loss.backward() + optimizer_no_hook.step() + + dist.barrier() + + # Now verify parameters are equal. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Verify optimizer modified appropriate parameter set, + # otherwise they'd be trivially equal above. + if optimize_subset: + self.assertNotEqual( + opt_hook_init_params[0], + next(iter(ddp_model_with_optimizer_hook.parameters())), + ) + # Untouched params should be equal + self.assertEqual( + opt_hook_init_params[1:], + list(ddp_model_with_optimizer_hook.parameters())[1:], + ) + else: + self.assertNotEqual( + opt_hook_init_params, + list(ddp_model_with_optimizer_hook.parameters()), + ) + dist.barrier() + + """ + # Commenting out the following 3 tests as they cause Sandcastle jobs to fail + # Failure signature: + # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw + + from torch.testing._internal.common_utils import parametrize + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("grad_as_bucket_view", [True, False]) + @parametrize("static_graph", [True, False]) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adamw( + self, + grad_as_bucket_view, + static_graph, + optimize_subset, + ): + adamw_lr = 1e-2 + adamw_betas = (0.9, 0.99) + adamw_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + grad_as_bucket_view, + static_graph, + torch.optim.AdamW, + optimize_subset, + adamw_lr, + betas=adamw_betas, + eps=adamw_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): + adam_lr = 1e-2 + adam_betas = (0.9, 0.99) + adam_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static graph + torch.optim.Adam, + optimize_subset, + adam_lr, + betas=adam_betas, + eps=adam_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): + sgd_lr = 1e-2 + sgd_momentum = 0.9 + sgd_weight_decay = 0.01 + # Not testing grad_as_bucket_view and static_graph as they are + # tested in AdamW test above. + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static_graph + torch.optim.SGD, + optimize_subset, + sgd_lr, + momentum=sgd_momentum, + weight_decay=sgd_weight_decay, + ) + """ + + @skip_if_lt_x_gpu(2) + def test_get_data_parallel_params(self): + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params( + model, named_params=True + ) + for name, _ in dp_params: + self.assertNotEqual(f"module.{params_to_ignore[0]}", name) + + # test named_params=False, just check if returns the expected + # no of parameters. + num_ddp_params = len(list(model.parameters())) - 1 + count = 0 + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(model, named_params=False) + for _ in dp_params: + count += 1 + self.assertEqual(count, num_ddp_params) + + def _test_ddp_apply_optim_in_backward( + self, + optim_cls, + optim_kwargs, + init_before, + gradient_as_bucket_view=True, + ): + # Need to seed to ensure inputs are unique across rank. Otherwise, + # allreduce won't have any effect. + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + torch.cuda.set_device(self.rank) + + # Test a simple linear as well as a ResNet model. + models_to_test = [ + nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda() + ] + if HAS_TORCHVISION: + models_to_test.append(torchvision.models.resnet50().cuda()) + + for j, model in enumerate(models_to_test): + model_optim_in_bwd = copy.deepcopy(model) + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + optim = optim_cls(model.parameters(), **optim_kwargs) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + model_optim_in_bwd = nn.parallel.DistributedDataParallel( + model_optim_in_bwd, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + + for p1, p2 in zip(model.parameters(), model_optim_in_bwd.parameters()): + self.assertEqual(p1, p2, "Parameters not initially equal!") + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + for i in range(8): + inp = ( + torch.randn(1, 3, 1000, 1000, device="cuda") + if j == 1 + else torch.randn(10, 3, device="cuda") + ) + model(inp).sum().backward() + optim.step() + model_optim_in_bwd( + inp + ).sum().backward() # runs optimizer as well + for p1, p2 in zip( + model.parameters(), model_optim_in_bwd.parameters() + ): + self.assertEqual( + p1, p2, f"Params not equal at iteration {i}" + ) + self.assertTrue( + p2.grad is None, + f"Optim in backward grad is not None at {i}", + ) + + # set_to_none for regular optimizer to match in backward + # case. + optim.zero_grad(set_to_none=True) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward(self): + for optim_cls, init_before in itertools.product( + [torch.optim.SGD, torch.optim.Adam], [True, False] + ): + with self.subTest(optim_cls=optim_cls): + self._test_ddp_apply_optim_in_backward( + optim_cls=optim_cls, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self): + for init_before in [True, False]: + self._test_ddp_apply_optim_in_backward( + optim_cls=torch.optim.SGD, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + gradient_as_bucket_view=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_ignored_params(self): + torch.cuda.set_device(self.rank) + for init_before in [True, False]: + with self.subTest(init_before=init_before): + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + inp = torch.randn(1, 10) + a, b = net(inp) + (a.transpose(0, 1) @ b).sum().backward() + # a.weight did not go through allreduce, so optimizer acted on local + # gradient, which should be different across ranks. Remaining params + # should be equal. + models = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(models, model) + rank0_model, remainder = models[0], models[1:] + for m in remainder: + self.assertNotEqual(rank0_model.a.weight, m.a.weight) + self.assertEqual( + list(rank0_model.b.parameters()), list(m.b.parameters()) + ) + self.assertEqual(rank0_model.a.bias, m.a.bias) + + def _get_fp16_config(self) -> _MixedPrecision: + return _MixedPrecision( + param_dtype=torch.float16, + reduce_dtype=torch.float16, + buffer_dtype=torch.float16, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_ignored_params(self): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + model = TwoLinLayerNet() + model.register_buffer("buffer", torch.ones(5)) + # Parameters to ignore are in the format {module_name}.{param_name} + to_ignore = ["a.weight", "buffer"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, to_ignore, + ) + mp_config = self._get_fp16_config() + net = torch.nn.parallel.DistributedDataParallel( + model.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=True, + ) + to_ignore = [f"module.{name}" for name in to_ignore] + expected_ignored = len(to_ignore) + n_ignored = 0 + # ignored params should not have _mp_param or _fp_param fields. + for (n, p) in itertools.chain(net.named_parameters(), net.named_buffers()): + if n in to_ignore: + n_ignored += 1 + self.assertFalse(hasattr(p, '_mp_param')) + self.assertFalse(hasattr(p, '_fp_param')) + else: + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + self.assertEqual(expected_ignored, n_ignored) + + def _test_ddp_native_mixed_precision( + self, gradient_as_bucket_view, set_grad_to_none + ): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + inp = torch.randn(10, 1) + mp_config = self._get_fp16_config() + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.m = torch.nn.Linear(1, 5) + self.register_buffer('buffer', torch.randn(1, 2)) + self.p = torch.nn.Parameter( + torch.randn(10, 5), requires_grad=False + ) + + def forward(self_, x): # noqa: B902 + params = self_.m.parameters() + for p in params: + self.assertEqual(mp_config.param_dtype, p.dtype) + + self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype) + + self.assertEqual(mp_config.param_dtype, x.dtype) + return self_.m(x) + self_.p + + m = MyModel() + + net = torch.nn.parallel.DistributedDataParallel( + m.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + # Buffers are casted in constructor. + self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype) + # Each param should have an mp_param in the lower precision, and + # an fp_param in the higher precision. + for p in net.parameters(): + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + for i in range(6): + loss = net(inp).sum() + loss.backward() + # Verify gradient synchronization and params and grads are fp32. + for n, param in net.named_parameters(): + self.assertEqual(param.dtype, torch.float32) + if param.grad is None: + assert n == 'module.p' # Only param that doesn't require grad + else: + self.assertEqual(param.grad.dtype, torch.float32) + tensor_list = [ + torch.zeros_like(param.grad) + for _ in range(dist.get_world_size(net.process_group)) + ] + dist.all_gather(tensor_list, param.grad) + g, rest = tensor_list[0], tensor_list[1:] + self.assertEqual(g.dtype, torch.float32) + for g_ in rest: + self.assertEqual(g_.dtype, torch.float32) + self.assertEqual(g, g_) + net.zero_grad(set_to_none=set_grad_to_none) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=False, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100): + rank = self.rank + m = torch.nn.Linear(1, 5) + try: + process_group = state.process_group + except AttributeError: + process_group = state + + net_with_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + net_with_hook.register_comm_hook(state=state, hook=hook) + net_without_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + for i in range(100): + # Clear gradients manually. + for g in [ + net_without_hook.module.weight.grad, + net_with_hook.module.weight.grad, + ]: + if g is not None: + g.requires_grad_(False) + g.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net_without_hook(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net_without_hook.module.weight.grad + avg = grad.clone() + expected_grad = ( + sum(i for i in range(dist.get_world_size())) / dist.get_world_size() + ) + loss_hook = net_with_hook(batch).sum() + loss_hook.backward() + grad_hook = net_with_hook.module.weight.grad + avg_hook = grad_hook.clone() + + if i < num_validated_iters: + # Verify hook grad with expected. + self.assertEqual( + avg_hook[0, 0].item(), + expected_grad, + msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}", + ) + # Verify hook grad with vanilla allreduce + self.assertEqual( + avg_hook[0, 0], + avg[0, 0], + msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce(self): + self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce_process_group(self): + # process_group is passed in to both DDP and comm. hook + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)] + process_group = torch.distributed.new_group(gpus) + self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_powerSGD(self): + for warm_start in [True, False]: + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=2, + warm_start=warm_start, + ) + self._test_ddp_hook_parity( + state=powersgd_state, hook=powerSGD.powerSGD_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_but_pass_in_sandcastle_if( + NO_MULTIPROCESSING_SPAWN, + "Disabled for environments that \ + don't support multiprocessing with spawn start method", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_post_localSGD(self): + # Although we start run local SGD at iteration 10, since we still use the global process group to run it, + # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + # Only validate the warmup iterations before local SGD is applied, + # because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all. + # Note that in practice a model averager has to be applied to run model averaging, + # so local gradient averaging is not necessary. + start_localSGD_iter = 10 + state = post_localSGD.PostLocalSGDState( + process_group=None, + subgroup=dist.group.WORLD, + start_localSGD_iter=start_localSGD_iter, + post_local_gradient_allreduce=False, + ) + self._test_ddp_hook_parity( + state=state, + hook=post_localSGD.post_localSGD_hook, + num_validated_iters=start_localSGD_iter, + ) + + # When `subgroup` is None, it is equivalent to the subgroup on the each node. + # For this single-node test environment, the intra-node process group is equivalent to + # the global process group. + if self.world_size == dist.get_world_size(): + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + # Since we start local SGD later than the total number of 100 iterations, + # no local SGD actually is executed, and we don't even need to provide a subgroup for this case. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=1000 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + def _prepare_single_device_module( + self, + rank, + process_group, + devices, + device_ids, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + device = devices[0] if devices else torch.device("cuda:%d" % rank) + ddp_model = DistributedDataParallel( + copy.deepcopy(model).to(device), + device_ids=device_ids, + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + + model.to(device) + + input = torch.randn(global_batch_size, 2).to(device) + target = torch.randn(global_batch_size, 4).to(device) + + return model, ddp_model, input, target + + def _prepare_cpu_module( + self, + process_group, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + ddp_model = DistributedDataParallel( + copy.deepcopy(model), + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + input = torch.randn(global_batch_size, 2) + target = torch.randn(global_batch_size, 4) + return model, ddp_model, input, target + + def _test_accumulate_gradients_no_sync( + self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False + ): + """ + This is the recommended way to implement accumulate grads. + If ``ddp_comm_hook`` input was specified, it will also register that hook + to the ``ddp_model``. The hook fed into this function should not change + the resulting gradients. + """ + group, group_id, rank = self._init_global_test() + world_size = get_world_size() + + # FIXME: Add testing for gloo/CUDA + if BACKEND == "mpi" or BACKEND == "gloo": + global_batch_size = world_size + local_batch_size = 1 + model, ddp_model, input, target = self._prepare_cpu_module( + group_id, global_batch_size, gradient_as_bucket_view + ) + + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + int_devices = rank_to_GPU[rank][:1] + devices = [torch.device("cuda:" + str(i)) for i in int_devices] + global_batch_size = world_size + local_batch_size = len(devices) + model, ddp_model, input, target = self._prepare_single_device_module( + rank, + group_id, + devices, + devices, + global_batch_size, + gradient_as_bucket_view, + ) + + if ddp_comm_hook is not None: + ddp_model.register_comm_hook(group_id, ddp_comm_hook) + + def step_model(model, input, target): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + loss.backward() + + # ensure accumulate grads works with no_grad => no grads are accumulated. + with torch.no_grad(): + with ddp_model.no_sync(): + ddp_model.train() + ddp_model(input) + + # check two model parameters over num_iters iterations + for iteration in range(num_iters): + step_model(model, input, target) + + ddp_input = input[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + ddp_target = target[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + + if iteration % 2 == 0: + # accumulate grads locally + with ddp_model.no_sync(): + step_model(ddp_model, ddp_input, ddp_target) + else: + # sync grads + step_model(ddp_model, ddp_input, ddp_target) + + for i, j in zip(model.parameters(), ddp_model.parameters()): + if not i.requires_grad: + continue + if iteration % 2 == 0: + self.assertNotEqual(i.grad, j.grad) + else: + self.assertEqual(i.grad, j.grad) + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + iteration) + input = input[torch.randperm(global_batch_size)] + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_grad_is_view(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync + using allreduce hook and validates whether future result was properly + passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + tensors = [bucket.buffer() / world_size] + return ( + group_id.allreduce(tensors) + .get_future() + .then(lambda fut: fut.value()[0]) + ) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce + hook that also uses then callbacks. In first then callback result is multiplied + by 2, and the second callback divides the result by 2 * world_size. It validates + whether final result was properly passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_with_then_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + fut = group_id.allreduce([bucket.buffer()]).get_future() + + def mult(fut): + # Multiply the result by 2. + return 2 * fut.wait()[0] + + def div(fut): + # Divide the result by 2 * world_size. + return fut.wait() / (2 * world_size) + + return fut.then(mult).then(div) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_with_then_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_get_future(self): + def mult(fut): + return [t * 3 for t in fut.wait()] + + def add(fut): + return [t + 1 for t in fut.wait()] + + group, group_id, rank = self._init_global_test() + input = _build_tensor(3, 2) + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + input = input.to(device_id) + fut = group_id.allreduce([input]).get_future() + res = fut.then(mult).then(add).wait() + expected = _build_tensor(3, 2 * len(group) * 3 + 1) + + self.assertEqual(res[0], expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + gpus = list(rank_to_GPU[rank]) + + for use_bucket_view, static_graph in itertools.product( + (False, True), (False, True) + ): + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test set static graph twice + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + set_static_graph_twice=True, + ) + + # test output_device + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test device_ids + gpus_list = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel( + gpu_subset=gpus_list, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + def _test_DistributedDataParallel_with_amp(self, grad_is_view=False): + torch.manual_seed(31415) + # Creates model and optimizer in default precision + model = copy.deepcopy(DDP_NET).cuda() + optimizer = torch.optim.SGD(model.parameters(), lr=0.03) + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + # verify grads are none before training + for p in ddp_model.parameters(): + self.assertTrue(p is not None) + self.assertTrue(p.grad is None) + + for idx in range(20): + optimizer.zero_grad() + # Runs the forward pass with autocasting. + with autocast(): + output = ddp_model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + # Backward passes under autocast are not recommended. + # Backward ops run in the same dtype autocast chose for corresponding forward ops. + scaler.scale(loss).backward() + + # verify grads are not none and are valid during training + for p in ddp_model.parameters(): + if p.requires_grad: + self.assertTrue(p.grad is not None) + self.assertFalse(p.grad.isnan().any()) + self.assertFalse(p.grad.isinf().any()) + + # scaler.step() first unscales the gradients of the optimizer's assigned params. + # If these gradients do not contain infs or NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + idx) + input = input[torch.randperm(dist.get_world_size() * 2)] + + return ddp_model + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_with_amp_and_grad_is_view(self): + torch.cuda.set_device(self.rank) + ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=False + ) + ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=True + ) + for i, j in zip( + ddp_model_grad_not_view.parameters(), + ddp_model_grad_is_view.parameters(), + ): + self.assertEqual(i, j) + + def _test_DistributedDataParallel_SyncBatchNorm( + self, + gpu_subset, + rank, + local_bs, + global_bs, + offset, + output_device=None, + affine=True, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = BN_NET if affine else BN_NET_NO_AFFINE + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, device_ids=gpu_subset + ) + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + # data initialization + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + offset, + dist.get_world_size(), + 5 if affine else 2, + ) + self._barrier() + + def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view): + learning_rate = 0.03 + + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + averager = create_averager() + opt = torch.optim.SGD(net.parameters(), lr=learning_rate) + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + # Process group cannot be pickled in some environments, + # so cannot deep copy an averager. See: + # https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496 + averager2 = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step(opt, net, loss_fn, input, target) + averager.average_parameters(net.parameters()) + + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + for p1, p2 in zip( + net.parameters(), net_using_post_localSGD_opt.parameters() + ): + self.assertEqual(p1.data, p2.data) + + # Also check if the built-in step counters are the same to prevent a bug like #74737. + self.assertEqual(averager.step, averager2.step) + + def _create_periodic_model_averager(self): + return averagers.PeriodicModelAverager(period=4, warmup_steps=10) + + def _create_post_localSGD_optimizer(self, net, learning_rate, averager): + return post_localSGD_optimizer.PostLocalSGDOptimizer( + optim=torch.optim.SGD(net.parameters(), lr=learning_rate), + averager=averager, + ) + + def _perform_a_train_step(self, optimizer, net, loss_fn, input, target): + optimizer.zero_grad() + output = net(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + + def _test_post_localSGD_optimizer_step_reload( + self, create_averager, chkpt_file + ): + learning_rate = 0.03 + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank] + ) + + averager = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager + ) + + averager2 = create_averager() + dummy_post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + + if self.rank == 0: + torch.save( + {"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} + checkpoint = torch.load(chkpt_file, map_location=map_location) + dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"]) + + # Check that we didn't hit the trivial case + self.assertNotEqual(averager2.step, 0) + # Check if dummy averager was initialized to a correct value + self.assertEqual(averager.step, averager2.step) + + # Remove 'step' entry from a checkpoint. + # And make sure it is not in the state dictionary + del checkpoint["optimizer_state_dict"]["step"] + self.assertNotIn("step", checkpoint["optimizer_state_dict"]) + + # Check if checkpoint without a 'step' entry invokes a warning + with self.assertWarnsRegex( + expected_warning=UserWarning, + expected_regex="Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0.", + ): + dummy_post_localSGD_opt.load_state_dict( + checkpoint["optimizer_state_dict"] + ) + + self.assertEqual(averager2.step, 0) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_grad_is_view(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=True, + ) + + def _create_hierarchical_model_averager(self): + period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())]) + return hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=4 + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view( + self, + ): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=True, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_step_reload(self): + torch.cuda.set_device(self.rank) + with _rank_temp_file() as tmp_file: + self._test_post_localSGD_optimizer_step_reload( + self._create_periodic_model_averager, tmp_file + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self): + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last + ) + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last_3d + ) + + def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + self, memory_format + ): + group, group_id, rank = self._init_global_test() + num_processes = dist.get_world_size() + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(num_processes * 2) + + model = ONLY_SBN_NET + model_gpu = copy.deepcopy(model).cuda(rank) + model_DDP = nn.parallel.DistributedDataParallel( + model_gpu, device_ids=[rank] + ) + + shapes = [global_bs, 2, 4, 4] + ( + [] if memory_format is torch.channels_last else [4] + ) + + input_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + target_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_gpu, + target_gpu, + loss, + local_bs, + rank, + global_bs, + True, + bs_offset, + dist.get_world_size(), + memory_format=memory_format, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + # test output_device + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + # test device_ids + gpus = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + affine=False, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = len(gpus) * 2 + global_bs = dist.get_world_size() * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + @require_world_size(2) + def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = 1 + global_bs = dist.get_world_size() + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value( + self, + ): + group, group_id, rank = self._init_global_test() + model = nn.parallel.DistributedDataParallel( + ONLY_SBN_NET.cuda(rank), device_ids=[rank] + ) + + input_var = [] + for i in range(dist.get_world_size()): + input_var_rank = torch.cat( + [ + torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)), + torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)), + ], + dim=1, + ) + input_var.append(input_var_rank) + + all_input_var = torch.cat( + [ + x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) + for x in input_var + ], + dim=1, + ).cuda(rank) + + for i in range(100): + y = model(input_var[rank].cuda(rank)) + y.mean().backward() + + running_mean, running_var = ( + model.module.running_mean, + model.module.running_var, + ) + torch.testing.assert_close(running_mean, all_input_var.mean(1)) + torch.testing.assert_close(running_var, all_input_var.var(1)) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self): + group, group_id, rank = self._init_global_test() + # only do single GPU per process + gpus = [rank] + + # cpu training setup + model = BN_NET + + num_processes = dist.get_world_size() + local_bs = rank + 2 + bs_offset = int((rank + 3) * rank / 2) + global_bs = int((num_processes + 3) * num_processes / 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_half(self): + group, group_id, rank = self._init_global_test() + + model = copy.deepcopy(BN_NET) + model = model.half() + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = nn.parallel.DistributedDataParallel(model.cuda(rank), device_ids=[rank]) + inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank)) + # Check that forward/backward do not error with dtype mismatch + out = model(inp) + self.assertEqual(out.dtype, torch.float16) + out.sum().backward() + for param in model.parameters(): + self.assertEqual(param.grad.dtype, torch.float16) + + def _test_ddp_logging_data(self, is_gpu): + rank = dist.get_rank() + model_DDP = copy.deepcopy(DDP_NET) + if is_gpu: + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP.cuda(rank), device_ids=[rank] + ) + else: + model_DDP = nn.parallel.DistributedDataParallel(model_DDP) + + # dummy data initialization + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + if is_gpu: + input = input.cuda(rank) + target = target.cuda(rank) + + model_DDP._set_ddp_runtime_logging_sample_rate(2) + + for idx in range(20): + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + + self._model_step_with_zero_grad(model_DDP) + + # Verify DDP logging data is sampled as expected + # If it has ran more than 10 iterations and this is + # the sampled iteration for measuring run time stats, + # the run time stats for this idx-th iteration will not + # be zeros. + ddp_logging_data = model_DDP._get_ddp_logging_data() + if idx > 0 and (idx < 10 or idx % 2 == 0): + self.assertGreaterEqual( + ddp_logging_data.get("forward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertEqual(ddp_logging_data.get("iteration"), idx) + elif idx > 0: + # if the idx-th iteration is not sampled to set runtime stats, + # ddp_logging_data.iteration will not be updated to current + # iteration. + self.assertNotEqual(ddp_logging_data.get("iteration"), idx) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_ddp_logging_data_cpu(self): + def parse_env(var): + return os.environ[var] if var in os.environ else "N/A" + + dist.set_debug_level(dist.DebugLevel.INFO) + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=False) + + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size()) + self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank()) + self.assertEqual(ddp_logging_data.get("module_name"), "Net") + self.assertEqual(ddp_logging_data.get("device_ids"), "") + # output_device is -1 in default if it is not set, e.g. + # output_device of CPU training is -1. + self.assertEqual(ddp_logging_data.get("output_device"), -1) + self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1) + self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024) + self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0) + self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0) + self.assertEqual( + ddp_logging_data.get("backend_name"), dist.get_backend(group_id) + ) + self.assertEqual(ddp_logging_data.get("iteration"), 18) + params = list(model_DDP.parameters()) + num_params = 0 + param_size = 0 + params = list(filter(lambda parameter: parameter.requires_grad, params)) + for p in params: + num_params += 1 + param_size += p.numel() * p.element_size() + self.assertEqual(ddp_logging_data.get("dtypes"), "float") + self.assertEqual( + ddp_logging_data.get("total_parameter_size_bytes"), param_size + ) + self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params) + self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size)) + self.assertEqual( + ddp_logging_data.get("master_port"), parse_env("MASTER_PORT") + ) + self.assertEqual( + ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR") + ) + self.assertEqual( + ddp_logging_data.get("torch_distributed_debug"), + parse_env("TORCH_DISTRIBUTED_DEBUG"), + ) + self.assertEqual( + ddp_logging_data.get("cuda_visible_devices"), + parse_env("CUDA_VISIBLE_DEVICES"), + ) + if ddp_logging_data.get("backend_name") == "gloo": + self.assertEqual( + ddp_logging_data.get("gloo_socket_ifname"), + parse_env("GLOO_SOCKET_IFNAME"), + ) + self.assertEqual( + ddp_logging_data.get("gloo_device_transport"), + parse_env("GLOO_DEVICE_TRANSPORT"), + ) + default_gloo_threads = 2 + self.assertEqual( + ddp_logging_data.get("gloo_num_threads"), + default_gloo_threads, + ) + + self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None) + self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None) + self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None) + self.assertEqual(ddp_logging_data.get("nccl_debug"), None) + self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None) + self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None) + # test runtime logging fields + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0) + self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1) + self.assertEqual( + ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size) + ) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), 1 + ) + self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + # test larger net with mixed data types, verify multiple bucket sizes + model = LargeNet() + model.float() + model.fc1.double() + model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5) + ddp_logging_data = model_DDP._get_ddp_logging_data() + params = list(model_DDP.parameters()) + self.assertEqual( + ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024) + ) + bucket_sizes = [ + params[1].numel() * params[1].element_size(), + params[0].numel() * params[0].element_size(), + ] + self.assertEqual( + ddp_logging_data.get("bucket_sizes"), + ", ".join(str(x) for x in bucket_sizes), + ) + self.assertEqual(ddp_logging_data.get("dtypes"), "double, float") + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_ddp_logging_data_gpu(self): + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=True) + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("device_ids"), str(rank)) + self.assertEqual(ddp_logging_data.get("output_device"), rank) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # test runtime logging fields + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_static_graph_api_cpu(self): + model_DDP = nn.parallel.DistributedDataParallel(DDP_NET) + expected_err = "should be called before training loop starts" + with self.assertRaisesRegex(RuntimeError, expected_err): + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + offset = dist.get_rank() * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + model_DDP._set_static_graph() + + # Verify error was logged in ddp_logging_data. + verify_ddp_error_logged(model_DDP, expected_err) + + @skipIfNoTorchVision + def test_SyncBatchNorm_process_group(self): + # When adopting `convert_sync_batchnorm` to convert a `nn.modules`, + # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm` + # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models). + + process_ids = 0 + process_group = torch.distributed.new_group([process_ids]) + res50_model = torchvision.models.resnet50() + res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(res50_model), process_group + ) + process_group_sync = res50_model_sync.layer1[0].bn1.process_group + self.assertEqual(process_group_sync, process_group) + + def _run_reduction_test( + self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None + ): + if reduction_fn != dist.all_reduce and dst is None: + raise ValueError(f"Reduction fn {reduction_fn} must specify dst!") + if dst is not None: + reduction_fn(tensor, dst, op) + # Only destination rank tensor is expected to have final result. + if dist.get_rank() == dst: + self.assertEqual(tensor, expected_tensor) + else: + reduction_fn(tensor, op) + self.assertEqual(tensor, expected_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allreduce(self): + torch.cuda.set_device(self.rank) + # Run all_reduce with PRODUCT + element = self.rank % 2 == 0 + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([False, False]).to(self.rank), op + ) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test(input_tensor, expected_tensor, op) + + # Run all_reduce with SUM + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([True, True]).to(self.rank), op + ) + # TODO: NCCL backend does not work correctly for bitwise reduction ops + # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for + # these once it is supported. + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allgather(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, True]} + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + # Preserve a copy of the tensor to compare against after allgather. + input_tensor_copy = input_tensor.clone() + tensor_list = [ + torch.tensor([False, False]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, input_tensor) + + self.assertEqual(len(tensor_list), dist.get_world_size()) + for i, t in enumerate(tensor_list): + expected = torch.tensor(inp[i % 2]).to(self.rank) + self.assertEqual(t, expected) + # Ensure that the input tensor is not modified, since this collective + # does not modify its input. + self.assertEqual(input_tensor_copy, input_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_nccl_backend_bool_reduce(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, False]} + # Run reduce() with product op + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = torch.tensor([False, False]).to(self.rank) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test( + input_tensor, expected_tensor, op, dist.reduce, dst=0 + ) + + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = ( + torch.tensor([True, True]).to(self.rank) + if self.rank == 0 + else input_tensor.clone() + ) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_broadcast(self): + tensor_size = 10 + bcast_tensor = torch.tensor( + [ + (random.random() < 0.5 if self.rank == 0 else False) + for _ in range(tensor_size) + ] + ).to(self.rank) + dist.broadcast(bcast_tensor, src=0) + # Now allgather and ensure the tensors are equal. + tensor_list = [ + torch.tensor([False for _ in range(tensor_size)]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, bcast_tensor) + expected = tensor_list[0] + for tensor in tensor_list[1:]: + self.assertEqual(tensor, expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedSampler_padding(self): + # Tests padding of distributed sampler. + world_size = dist.get_world_size() + + # Simulates the 'casual' dataset size + dataset_size = 100 + world_size + 1 + dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)] + + # Simulates the 'tiny' dataset size + dataset_tiny_size = max(world_size // 2 - 1, 1) + dataset_tiny = [ + torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size) + ] + + # Specifying drop_last=True will cause the tail of the data to be dropped. + dist_sampler = DistributedSampler(dataset=dataset, drop_last=True) + local_num_samples, local_dataset_size = ( + dist_sampler.num_samples, + dist_sampler.total_size, + ) + # The effective dataset size should be the greatest integer that is <= + # dataset_size that is divisible by the world_size. This is to ensure each + # rank processes the same number of samples. + effective_dataset_size = ( + math.ceil((dataset_size - world_size) / world_size) + if dataset_size % world_size != 0 + else dataset_size / world_size + ) + self.assertEqual(local_num_samples, effective_dataset_size) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler)) + self.assertEqual(len(indices_list), local_num_samples) + + def validate_global_samples(local_num_samples): + # Ensure that each rank processes the same number of samples. + world_samples = [ + torch.LongTensor([0]).to(self.rank) for _ in range(world_size) + ] + dist.all_gather( + world_samples, torch.tensor([local_num_samples]).to(self.rank) + ) + world_samples = [sample.item() for sample in world_samples] + self.assertEqual(len(set(world_samples)), 1) + + validate_global_samples(local_num_samples) + + # drop_last=False is the default and will add additional indices to be sampled, + # increasing the effective dataset size. + dist_sampler_added_samples = DistributedSampler(dataset=dataset) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples.num_samples, + dist_sampler_added_samples.total_size, + ) + # The effective dataset size is the smallest integer that is >= dataset_size + # and divisible by the world size. + self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size)) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples)) + self.assertEqual(len(indices_list), local_num_samples) + + # Ensure that each rank processes the same number of samples. + validate_global_samples(local_num_samples) + + # Ensure additional samples are padded even when + # the extremely small dataset is given. + dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples_tiny.num_samples, + dist_sampler_added_samples_tiny.total_size, + ) + self.assertEqual( + local_num_samples, math.ceil(dataset_tiny_size / world_size) + ) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples_tiny)) + self.assertEqual(len(indices_list), local_num_samples) + validate_global_samples(local_num_samples) + + def _test_allgather_object(self, subgroup=None): + # Only set device for NCCL backend since it must use GPUs. + + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + output_gathered = [None for _ in range(dist.get_world_size())] + dist.all_gather_object( + output_gathered, + gather_objects[self.rank % len(gather_objects)], + group=subgroup, + ) + + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + def test_all_gather_object_default_pg(self): + return self._test_allgather_object() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_all_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_allgather_object(subgroup=subgroup) + + def _test_gather_object(self, pg=None): + # Ensure stateful objects can be gathered + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + my_rank = dist.get_rank(pg) + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=my_rank))) + + output_gathered = [None for _ in range(dist.get_world_size(pg))] + gather_on_rank = 0 + dist.gather_object( + gather_objects[self.rank % len(gather_objects)], + object_gather_list=output_gathered + if my_rank == gather_on_rank + else None, + dst=gather_on_rank, + group=pg, + ) + if my_rank != gather_on_rank: + self.assertEqual( + output_gathered, [None for _ in range(dist.get_world_size())] + ) + else: + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + # Validate errors when objects can't be pickled. + class Bar: + pass + + b = Bar() + gather_objects = [b for _ in range(dist.get_world_size())] + with self.assertRaisesRegex(AttributeError, "Can't pickle local object"): + dist.all_gather_object( + [None for _ in range(dist.get_world_size())], + gather_objects[self.rank], + group=pg, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object(self): + return self._test_gather_object() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_gather_object(subgroup) + + def validate_net_equivalence(self, net): + # Helper to validate synchronization of nets across ranks. + net_module_states = list(net.module.state_dict().values()) + # Check that all tensors in module's state_dict() are equal. + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for tensor in tensor_list: + self.assertEqual(tensor, t) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sync_module_states(self): + # Test that after calling _sync_module_states, models across ranks + # are the same and are equal to the model on the input rank. + dim = 2 + rank = self.rank + rank_to_broadcast = 1 + # Seed to ensure that ranks are initialized with different initial models. + torch.manual_seed(rank) + model = nn.Linear(dim, dim, bias=False) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + new_model = nn.Linear(dim, dim, bias=False).cuda(rank) + net.module = copy.deepcopy(new_model) + # Assert params are different + net_module_states = list(net.module.state_dict().values()) + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for i, tensor in enumerate(tensor_list): + if i == rank: + self.assertEqual(t, tensor) + else: + # tensor from another rank should be different. + self.assertNotEqual(t, tensor) + + _sync_module_states( + module=net.module, + process_group=net.process_group, + broadcast_bucket_size=net.broadcast_bucket_size, + src=rank_to_broadcast, + params_and_buffers_to_ignore=net.parameters_to_ignore, + ) + # Now all model params should be the same. + self.validate_net_equivalence(net) + # Since the network params were broadcast from rank_to_broadcast, validate that + # they are the same as new_model on rank_to_broadcast. + if rank == rank_to_broadcast: + expected_states = new_model.state_dict().values() + for t, expected in zip(net_module_states, expected_states): + self.assertEqual(t, expected) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_grad_div_uneven_inputs(self): + # Test gradient division during training with join() API. If + # divide_by_initial_world_size=False, we scale by the effective world + # size when allreducing grads. + dim = 5 + batch = 1 + grad_scale = 50 + rank = self.rank + model = nn.Linear(dim, dim, bias=False) + inp = torch.ones(batch, dim, device=self.rank) * grad_scale + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + n_iters = 3 + if self.rank > 0: + n_iters += 2 + + with net.join(divide_by_initial_world_size=False): + for _ in range(n_iters): + loss = net(inp).sum() + loss.backward() + # The grad is always expected_grad, since we divide by the number + # of currently active processes and inactive processes contribute + # zero gradient. If we kept dividing by static initial world + # size as processes leave, the grad would be smaller. + expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grads so that it's the same every iteration + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + # If divide_by_initial_world_size=True (default), we always scale grads + # by the initial world_size. + with net.join(divide_by_initial_world_size=True): + for i in range(n_iters): + loss = net(inp).sum() + loss.backward() + effective_ws = dist.get_world_size() + if i >= 3: + effective_ws -= 1 + expected_grad = ( + torch.ones(dim, dim, device=self.rank) + * grad_scale + * effective_ws + ) / dist.get_world_size() + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grad so that it's the same every iteration. + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + def _test_ddp_profiling(self, profiler_ctx, profiler_ctx2=None): + """Runs DDP based model training and captures profiles. + This test will do two profiler runs. + 1. An inital basic run to check if profiler events are correctly captured. + 2. A second profiling pass after running some iterations of DDP, to check robustness of thread local state. + + args + profiler_ctx : Profiler context manager for pass 1 + profiler_ctx2 : Profiler context manager for pass 2. + This can be left out as None, in which case a deepcopy + of profiler_ctx is used. + Returns: + prof: Instantiated profiler object that can be used for post analysis. + """ + batch = 3 + dim = 10 + num_iters = 6 + torch.cuda.set_device(self.rank) + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if profiler_ctx2 is None: + profiler_ctx2 = copy.deepcopy(profiler_ctx) + + with profiler_ctx as prof: + for i in range(num_iters): + loss = net(inp).sum() + loss.backward() + + all_reduce_event_name = f"{dist.get_backend()}:all_reduce" + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in events) + self.assertEqual(event_count, num_iters) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, all_reduce_event_name) + + broadcast_event_name = f"{dist.get_backend()}:broadcast" + broadcast_events = get_profiling_event(broadcast_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in broadcast_events) + # Broadcast is called during rebuild_buckets + self.assertGreaterEqual(event_count, 1) + for event in broadcast_events: + self.assertEqual(event.name, broadcast_event_name) + + # Run DDP with profiling for a few iterations, then enable profiling + # for a single pass, and ensure it is recorded. This tests that the + # thread local state is correctly updated. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + for i in range(3): + loss = net(inp).sum() + loss.backward() + # Now enable the profiler. + with profiler_ctx2 as prof: + loss = net(inp).sum() + loss.backward() + + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + self.assertGreaterEqual(len(events), 1) + self.assertGreaterEqual(events[0].count, 1) + self.assertEqual(events[0].name, all_reduce_event_name) + for event in events: + self.assertTrue(event.is_async) + # Ensure searching unused parameters was profiled + events = get_profiling_event("search_unused_parameters", prof) + self.assertEqual(len(events), 1) + + return prof + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle("Currently failing in NVIDIA internal CI") + def test_ddp_profiling_autograd_profiler(self): + autograd_profiler_ctx = torch.autograd.profiler.profile() + return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_ddp_profiling_torch_profiler(self): + cpu_act = torch.profiler.ProfilerActivity.CPU + cuda_act = torch.profiler.ProfilerActivity.CUDA + torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act]) + prof = self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx) + + if dist.get_backend() != "nccl": + return + + # Note comment out the "os.remove(trace_file)" in `get_profiler_nccl_meta()` + # to debug any mismatches. + nccl_meta_events = get_profiler_nccl_meta(prof) + self.assertGreater(len(nccl_meta_events), 0) + + nccl_meta = self._sanity_check_profiler_nccl_meta(nccl_meta_events) + + # additionally check the specific collectives in this test case + self.assertEqual(len(nccl_meta["allreduce"]), 2) + self.assertEqual(len(nccl_meta["wait"]), 1) + + # check allreduce message sizes + a0 = nccl_meta["allreduce"][0] + self.assertEqual(a0["Out msg nelems"], 100, msg=f"{a0}") + self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") + a1 = nccl_meta["allreduce"][1] + self.assertEqual(a1["Out msg nelems"], 1, msg=f"{a1}") + self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") + + def _validate_execution_trace_nccl(self, et_file: str) -> None: + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We test for basic fields in theese nodes in the Execution Trace. + """ + with open(et_file) as f: + et = json.load(f) + pg_cfg_node = [n for n in et["nodes"] if n["name"] == "## process_group:init ##"] + self.assertGreaterEqual(len(pg_cfg_node), 1) + nccl_meta_nodes = [n for n in et["nodes"] if n["name"] == "record_param_comms"] + self.assertEqual(len(nccl_meta_nodes), 3) + per_coll_meta = defaultdict(list) + + # Sanity check NCCL metadata nodes + for n in nccl_meta_nodes: + attrs_list = n.get("attrs", []) + self.assertGreater(len(attrs_list), 0) + attrs = {a["name"]: a["value"] for a in attrs_list} + + collname = attrs.get("collective_name", "") + self.assertNotEqual(collname, "") + self.assertNotEqual(attrs.get("dtype", ""), "") + + per_coll_meta[collname].append(attrs) + if collname in {"wait"}: + continue + + self.assertEqual(attrs["pg_name"], "0") # yes this is a string + self.assertEqual(attrs["pg_desc"], "default_pg") + self.assertEqual(attrs["pg_size"], 2) + + self.assertGreaterEqual(attrs.get("in_msg_nelems", -1), 0) + self.assertGreaterEqual(attrs.get("out_msg_nelems", -1), 0) + self.assertTrue("in_split_size" in attrs.keys()) + self.assertTrue("out_split_size" in attrs.keys()) + self.assertEqual(attrs.get("global_rank_start", -1), 0) + self.assertEqual(attrs.get("global_rank_stride", -1), 1) + + # print(per_coll_meta) + self.assertEqual(len(per_coll_meta["allreduce"]), 2) + self.assertEqual(len(per_coll_meta["wait"]), 1) + + # check allreduce message sizes + a0 = per_coll_meta["allreduce"][0] + self.assertEqual(a0["out_msg_nelems"], 100, msg=f"{a0}") + self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") + a1 = per_coll_meta["allreduce"][1] + self.assertEqual(a1["out_msg_nelems"], 1, msg=f"{a1}") + self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") + + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + @unittest.skipIf(BACKEND != "nccl", "Tests nccl metadata primarily.") + def test_ddp_profiling_execution_trace(self): + self.assertEqual(dist.get_backend(), "nccl") + # Create a temp file to save execution trace data + fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) + fp.close() + et_file = fp.name + et = ExecutionTraceObserver().register_callback(et_file) + + # first profiler context need not have ET + torch_profiler_ctx1 = torch.profiler.profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + ) + # collect ET in second profiler pass + torch_profiler_ctx2 = torch.profiler.profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + execution_trace_observer=et + ) + prof = self._test_ddp_profiling( + profiler_ctx=torch_profiler_ctx1, + profiler_ctx2=torch_profiler_ctx2, + ) + + print(f"Execution trace saved at {fp.name}") + self._validate_execution_trace_nccl(et_file) + + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_join_model_equivalence(self): + # Verifies equivalence with model training locally and with DDP under + # the join context manager. + batch = 3 + dim = 10 + learning_rate = 0.03 + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + local_model = copy.deepcopy(model) + local_model = local_model.cuda(self.rank) + rank_to_iter_mapping = { + rank: 2 * (rank + 1) for rank in range(dist.get_world_size()) + } + # run local model + local_iters = sum(rank_to_iter_mapping.values()) + local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate) + for _ in range(local_iters): + local_optim.zero_grad() + out = local_model(inp) + loss = out.sum() + loss.backward() + local_optim.step() + + # run DDP model with join API + num_iters = rank_to_iter_mapping[self.rank] + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + ddp_optim = torch.optim.SGD( + model.parameters(), lr=learning_rate * dist.get_world_size() + ) + with net.join(): + for i in range(num_iters): + ddp_optim.zero_grad() + out = net(inp) + loss = out.sum() + loss.backward() + torch.cuda.synchronize(device=self.rank) + ddp_optim.step() + + # Validate model state dicts are equal + for (_, local_tensor), (_, dist_tensor) in zip( + local_model.state_dict().items(), net.module.state_dict().items() + ): + self.assertEqual(local_tensor, dist_tensor) + + def _run_uneven_inputs_test( + self, + test_case, + iteration_mapping, + find_unused_params, + ): + model = test_case.model + inp = test_case.inp + rank = self.rank + sync_interval = test_case.sync_interval + torch.cuda.set_device(rank) + # Ensure all outstanding GPU work is completed so this test runs independently. + dist.barrier() + # Bucket_cap_mb is intentionally low to test allreduce scheduling when + # there are many buckets. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), + device_ids=[rank], + bucket_cap_mb=1, + find_unused_parameters=find_unused_params, + ) + # Register hook if specified + if test_case.hook is not None: + net.register_comm_hook(test_case.state, test_case.hook) + print(f"registered hook {test_case.hook}") + + # Determine num iters for this rank via the passed in mapping. + num_iters = iteration_mapping[rank] + # If we throw when earliest rank terminates, we should ensure + # that we iterate for that minimum number of times. + num_iters_tensor = torch.tensor( + [num_iters], device=torch.cuda.current_device() + ) + dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN) + min_num_iters = num_iters_tensor.item() + total_iters = 0 + if test_case.throw_on_early_termination: + if min_num_iters == num_iters: + # Early termination rank(s) + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + else: + exception_ctx = nullcontext() + with exception_ctx: + with net.join( + throw_on_early_termination=test_case.throw_on_early_termination + ): + for i in range(num_iters): + # Use model.no_sync() to disable grad synchronization every + # sync_interval. + if i % sync_interval != 0: + context = net.no_sync() + else: + context = nullcontext() + with context: + if isinstance(inp, tuple): + loss = net(*inp).sum() + else: + loss = net(inp).sum() + loss.backward() + self._model_step(net) + # Ensure completion of GPU kernels (including allreduce). If the + # join API is not properly implemented, then this should hang + # since the allreduce will hang. + torch.cuda.synchronize(device=rank) + total_iters += 1 + if test_case.throw_on_early_termination: + # Ensure we iterated min_num_iters times. + self.assertEqual(total_iters, min_num_iters) + else: + # Ensure we iterated at least min_num_iters times. + self.assertGreaterEqual(total_iters, min_num_iters) + + # Ensure completion of all GPU kernels. + torch.cuda.synchronize(device=rank) + # When throwing on early rank termination, we do not + # broadcast model state from an authoritative rank. All models + # should already be in sync. + if not test_case.throw_on_early_termination: + self.assertTrue(net._authoritative_rank) + # All ranks should have agreed on the same authoritative_rank! + final_rank_tensor = torch.tensor( + [net._authoritative_rank], device=self.rank + ) + tensor_list = [ + torch.zeros_like(final_rank_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, final_rank_tensor) + max_rank = dist.get_world_size() - 1 + self.assertSetEqual( + {max_rank}, {tensor.item() for tensor in tensor_list} + ) + # Ensure that all models are the same across ranks after all have joined. + self.validate_net_equivalence(net) + # Ensure that running with DDP uneven inputs was logged. + ddp_logging_data = net._get_ddp_logging_data() + self.assertTrue(ddp_logging_data.get("join_uneven_inputs")) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs_stop_iteration_sync_bn(self): + # Tests that uneven inputs join handler correctly throws StopIteration + # for models with SyncBN or general collective comm when + # throw_on_early_termination=True. + class ModelWithComm(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(2, 40, bias=False) + + def forward(self, x): + x = self.lin(x) + dist.all_reduce(x) + return x + + torch.cuda.set_device(self.rank) + model_bn = BN_NET + model_bn = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(model_bn) + ).cuda(self.rank) + comm_model = ModelWithComm().cuda(self.rank) + model_input = torch.randn(10, 2).cuda(torch.cuda.current_device()) + + for model in [model_bn, comm_model]: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + min_num_iters = 5 + if self.rank != 0: + # Early termination rank(s) + num_iters = min_num_iters + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + num_iters = min_num_iters * 2 + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + n = 0 + with exception_ctx: + with model.join(throw_on_early_termination=True): + for i in range(num_iters): + loss = model(model_input).sum() + loss.backward() + self._model_step(model) + n += 1 + + self.assertEqual(n, min_num_iters) + # Verify model equivalence + self.validate_net_equivalence(model) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs(self): + dim = 1000 + batch = 1 + # Create a variety of models to run uneven input tests on. + large_model = nn.Sequential( + nn.Conv2d(1, 20, 5), + nn.ReLU(), + nn.Conv2d(20, 32, 5), + nn.ReLU(), + nn.Conv2d(32, 256, 5), + nn.ReLU(), + ) + small_model = nn.Linear(dim, dim, bias=False) + bn_net = BatchNormNet() + + class UnusedParamModule(nn.Module): + def __init__(self, unused_params_rank): + super().__init__() + self.t0 = Task() + self.t1 = Task() + self.unused_params_rank = unused_params_rank + + def task_parameters(self): + return (self.t0.p, self.t1.p) + + def forward(self, x, rank): + return ( + self.t1(self.t0(x)) + if rank != self.unused_params_rank + else self.t1(x) + ) + + unjoined_rank_with_unused_params_model = UnusedParamModule(1) + joined_rank_with_unused_params_model = UnusedParamModule(0) + + rank = self.rank + models_to_test = [ + # Network with batchnorm + DDPUnevenTestInput( + name="batch_norm_net", + model=bn_net, + inp=torch.ones(batch, 2, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="large_conv_model", + model=large_model, + inp=torch.ones(batch, batch, dim, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model", + model=small_model, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + # Unused parameter test where rank that does not join early has unused params + DDPUnevenTestInput( + name="unjoined_rank_with_unused_params_model", + model=unjoined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + # Unused parameter test where rank that does join early has unused params + DDPUnevenTestInput( + name="joined_rank_with_unused_params_model", + model=joined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + ] + + # Test models that have hook installed. + models_with_hook = [ + DDPUnevenTestInput( + name="small_model_allreduce_hook", + model=small_model, + hook=default.allreduce_hook, + state=None, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model_power_sgd_hook", + model=small_model, + hook=powerSGD.powerSGD_hook, + state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + # Config so that powerSGD runs immediately instead of + # allreduce. + start_powerSGD_iter=1, + warm_start=False, + use_error_feedback=False, + ), + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + ] + models_to_test.extend(models_with_hook) + + # Add resnet model if we have torchvision installed. + if HAS_TORCHVISION: + resnet_model = torchvision.models.resnet50() + models_to_test.append( + DDPUnevenTestInput( + name="resnet_model", + model=resnet_model, + inp=torch.ones(1, 3, 1000, 1000), + sync_interval=1, + ) + ) + + # Test with no_sync every 2, 3, 4, ... iterations. + models_with_sync = [] + for i, test_input in enumerate(models_to_test): + models_with_sync.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=i + 2, + ) + ) + + throw_on_early_term_tests = [] + for test_input in models_to_test: + throw_on_early_term_tests.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=test_input.sync_interval, + throw_on_early_termination=True, + ) + ) + + models_to_test.extend(models_with_sync) + models_to_test.extend(throw_on_early_term_tests) + + # 0 iteration tests for when one process does not train model at all, so + # we must shadow the broadcast calls made when rebuilding buckets. + baseline_num_iters = [0, 5] + iteration_offsets = [2, 3, 10] + num_uneven_ranks = [1] + if dist.get_world_size() > 2: + num_uneven_ranks.append(2) + iteration_mappings = [] + # Generate rank : num_iters mappings for various uneven input scenarios. + # This includes cases where rank 0 joins early and all other ranks join + # later, and scenarios where multiple ranks join early, but at different + # iterations, and later ranks join later. + for num_early_join_ranks in num_uneven_ranks: + for baseline_iter in baseline_num_iters: + for offset in iteration_offsets: + mapping = dict.fromkeys(range(0, num_early_join_ranks), baseline_iter) + # if num_early_join_ranks > 1, ranks > 0 that will join early + # iterate offset//2 more times than rank 0, to test nodes + # depleting inputs at different times. + if num_early_join_ranks > 1: + for rank in mapping.keys(): + if rank > 0: + mapping[rank] += offset // 2 + mapping.update( + dict.fromkeys(range(num_early_join_ranks, dist.get_world_size()), baseline_iter + offset) + ) + iteration_mappings.append(mapping) + + for (test_case, iteration_mapping) in itertools.product( + models_to_test, iteration_mappings + ): + if self.rank == 0: + print( + f"""Running test: {test_case.name} sync interval + {test_case.sync_interval} with iteration mapping + {iteration_mapping}""" + ) + self._run_uneven_inputs_test( + test_case, + iteration_mapping, + find_unused_params=("unused_params_model" in test_case.name), + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_join_disable(self): + # tests that if net.join() with enable=False is specified, DDP works as + # expected with even inputs. + torch.manual_seed(self.rank) + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) * self.rank + n_iters = 5 + world_size = dist.get_world_size() + with net.join(enable=False): + for _ in range(n_iters): + # Clear grads + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + out = net(inp) + loss = out.sum() + loss.backward() + # Validate gradients to ensure that we divide by the correct + # world_size when join mode is disabled. + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual(net.module.weight.grad.item(), expected_grad) + + join_config = net._join_config + self.assertFalse(join_config.enable) + self.validate_net_equivalence(net) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_exception(self): + # Tests that exceptions during training are correctly propagated by the + # context manager. + error_str = "Intentional error" + + class ExceptionModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.param = nn.Parameter(torch.ones(1, requires_grad=True)) + + def forward(self, _): + raise ValueError(error_str) + + exception_module = ExceptionModule() + net = torch.nn.parallel.DistributedDataParallel( + exception_module.cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) + with self.assertRaisesRegex(ValueError, error_str): + with net.join(): + out = net(inp) + loss = out.sum() + loss.backward() + + def _test_broadcast_object_list(self, group=None): + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + # Only set device for NCCL backend since it must use GPUs. + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + backend = os.environ["BACKEND"] + if backend == "nccl": + torch.cuda.set_device(next_rank) + + src_rank = 0 + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + if IS_FBCODE: + # Create Tensor with > 2^31 Bytes storage requirements + # Only on FBCODE as testing OOMs in OSS + gather_objects.append(Foo(torch.randn(3, 178956971))) + objects = ( + gather_objects + if self.rank == src_rank + else [None for _ in gather_objects] + ) + + # Single object test with device specified. Backend="gloo", device=cpu + if backend != "nccl": + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device("cpu") + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="gloo", device=current_device+1 + # The test is gated by the fact GPU count is the same as world size to avoid the case + # when backend is gloo but there is no multiple GPU devices. + if backend != "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="nccl", device=current_device+1 + if backend == "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test: backward compatibility with device unspecified + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list(single_obj_list, src=0, group=group) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Multiple input objects test + if self.rank != src_rank: + self.assertNotEqual(objects, gather_objects) + dist.broadcast_object_list(objects, src=0, group=group) + self.assertEqual(objects, gather_objects) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_broadcast_object_list(self): + return self._test_broadcast_object_list() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + def _test_broadcast_object_list_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_broadcast_object_list(subgroup) + + def _test_ddp_ignore_params_arg(self, static_graph=False): + class TestModel(nn.Module): + def __init__(self, rank): + self.rank = rank + super().__init__() + self.fc1 = nn.Linear(1, 1, bias=False) + # Proxy that will be materialized to another architecture later. + # (after wrapping model with DDP) + if self.rank == 0: + self.fc2 = nn.Linear(1, 10, bias=False) + else: + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + device_id = self.rank + # Ensure the test works for both find_unused_parameter and broadcast_buffer settings. + for (find_unused, broadcast_buffers) in itertools.product( + [False, True], [False, True] + ): + model = TestModel(self.rank).float().to(device_id) + # Note that the model can have different shape buffers if we pass + # them in to be ignored as well. + model.fc2.register_buffer( + "ignore_buffer", torch.zeros(5 + self.rank, device=self.rank) + ) + proxy_params = list(model.fc2.parameters()) + proxy_buffers = list(model.fc2.buffers()) + model_fc2_name = next( + module_name + for module_name, module in model.named_modules() + if module is model.fc2 + ) + proxy_param_names = [ + f"{model_fc2_name}.{param_name}" + for param_name, _ in model.fc2.named_parameters() + ] + proxy_buffer_names = [ + f"{model_fc2_name}.{buf_name}" + for buf_name, _ in model.fc2.named_buffers() + ] + # Specify that we should ignore proxy_params since it will be + # materialized later. + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, proxy_param_names + proxy_buffer_names + ) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[device_id], + find_unused_parameters=find_unused, + broadcast_buffers=broadcast_buffers, + static_graph=static_graph, + ) + # Materialize new params. These are not registered in DDP and thus + # don't have autograd hooks installed on them. + ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id) + + # local model with the new materialized parameters. + local_model = copy.deepcopy(ddp.module).cuda(self.rank) + + inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1) + for i in range(6): + ddp(inp).sum().backward() + + local_model(inp).sum().backward() + # materialized param grad is not touched by DDP, so its grad should + # be the same as if running locally. + for materialized_param, local_param in zip( + ddp.module.fc2.parameters(), local_model.fc2.parameters() + ): + self.assertEqual(materialized_param.grad, local_param.grad) + + # fc1 parameter grad should still be different, due to allreduce. + for synced_param, local_param in zip( + ddp.module.fc1.parameters(), local_model.fc1.parameters() + ): + self.assertFalse(synced_param.grad == local_param.grad) + + # Proxy module grad should not be touched + for proxy_param in proxy_params: + self.assertTrue(proxy_param.grad is None) + + # Synchronize since we run multiple iterations of this test, to + # isolate failure hangs. + torch.cuda.synchronize(device=self.rank) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_ignore_params_arg(self): + self._test_ddp_ignore_params_arg(static_graph=False) + self._test_ddp_ignore_params_arg(static_graph=True) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_unused_params_rebuild_buckets_exception(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.net1(x) + + ddp = torch.nn.parallel.DistributedDataParallel( + ToyModel().cuda(self.rank), device_ids=[self.rank] + ) + for i in range(2): + inp = torch.rand(1, 10) + if i > 0: + # On 2nd iteration, this will fail during rebuild_buckets, + # but we should report an error regarding unused parameters + # since that is the underlying root cause. + try: + ddp(inp).sum().backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(ddp, msg) + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["net2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse( + True, "DDP unused parameters error not raised." + ) + else: + ddp(inp).sum().backward() + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_shared_grad_acc_unused_params(self): + # When find_unused_parameters=True, ensure we mark unused parameters + # even if they share gradient accumulators. + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + # net1, bias, and net1.bias are all unused params. + self.net1 = nn.Linear(10, 5, bias=False) + self.bias = nn.Parameter(torch.zeros(5)) + # net1.bias and self.bias are names for the same underlying + # parameter, so they share the same grad acc. This caused + # the bug reported in https://github.com/pytorch/pytorch/issues/41324. + self.net1.bias = self.bias + self.net2 = nn.Linear(10, 5) + + def forward(self, x): + return self.net2(x).sum() + + torch.cuda.set_device(self.rank) + model = ToyModel().to(torch.cuda.current_device()) + for static in [True, False]: + ddp_model = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + find_unused_parameters=True, + static_graph=static, + ) + inp = torch.randn(20, 10, device=self.rank) + for i in range(6): + loss = ddp_model(inp) + # To test https://github.com/pytorch/pytorch/issues/61982 + loss /= 10 + loss.backward() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device(self): + m = nn.Linear(10, 10).to(self.rank) + expected_len = 2 + + class TensorWrapper: + __slots__ = ["t", "moved_to_gpu"] + + def __init__(self, t): + self.t = t + self.moved_to_gpu = False + + # Handlers for specific types of validation we want to do based on + # the input type. + + def tuple_and_list_validator(x): + self.assertTrue(len(x), expected_len) + self.assertEqual(1, len({t.device for t in x})) + self.assertEqual(x[0].device.index, self.rank) + return x[0] + x[1] + + def namedtuple_validator(x): + self.assertEqual(x._fields, EXPECTED_FIELDS) + self.assertEqual(x.a.device.index, x.b.device.index) + self.assertEqual(x.a.device.index, self.rank) + return x.a + x.b + + def custom_type_validator(x): + self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu")) + x.t = x.t.to(self.rank) + x.moved_to_gpu = True + return x.t + + def dict_validator(x): + self.assertTrue(EXPECTED_FIELDS[0] in x.keys()) + self.assertTrue(EXPECTED_FIELDS[1] in x.keys()) + self.assertEqual(1, len({t.device for t in x.values()})) + self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank) + return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]] + + validators = { + TensorWrapper: custom_type_validator, + tuple: tuple_and_list_validator, + list: tuple_and_list_validator, + TestNamedTupleInput_0: namedtuple_validator, + TestNamedTupleInput_1: namedtuple_validator, + dict: dict_validator, + } + + class ToyModel(torch.nn.Module): + def __init__(self_): # noqa: B902 + super().__init__() + self_.lin = nn.Linear(10, 10, bias=False) + + def forward(self_, x, expected_type): # noqa: B902 + # Similar to scatter, the recursive to in the single-device + # case does not move tensors if they are in a custom type. + self.assertTrue(isinstance(x, expected_type)) + fwd_tensor = validators[expected_type](x) + return self_.lin(fwd_tensor) + + model = torch.nn.parallel.DistributedDataParallel( + ToyModel().to(self.rank), device_ids=[self.rank] + ) + + def train_iter(inp, input_type): + for _ in range(4): + out = model(inp, input_type) + out.sum().backward() + + # CPU tuple input, should be moved to the proper device before call + # to forward. + inp = tuple(torch.randn(10, 10) for _ in range(expected_len)) + train_iter(inp, tuple) + + # List CPU input, should be moved to proper device before call to + # forward. + inp = [torch.randn(10, 10) for _ in range(expected_len)] + train_iter(inp, list) + # Custom type containing tensor. The type is maintained, but the + # device is not propagated (which is what happens with scatter too) + inp = TensorWrapper(torch.randn(10, 10)) + train_iter(inp, TensorWrapper) + # NamedTuple input. The type should be maintained and tensor inputs + # should be moved to the correct device as in scatter. + batch = 5 + dim = 10 + a = torch.rand(batch, dim) + b = torch.rand(batch, dim) + + inp = TestNamedTupleInput_0(a, b) + train_iter(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + train_iter(inp, type(inp)) + + # dictionary input. + inp = { + EXPECTED_FIELDS[0]: a, + EXPECTED_FIELDS[1]: b, + } + train_iter(inp, type(inp)) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_namedtuple(self): + batch = 5 + dim = 10 + + a = torch.rand(batch, dim, device=self.rank) + b = torch.rand(batch, dim, device=self.rank) + + class NamedTupleModule(torch.nn.Module): + def __init__(self_): # noqa: B902 + super().__init__() + self_.lin = nn.Linear(10, 1) + + def forward(self_, input, expected_type): # noqa: B902 + # Without NamedTuple support, this would be of type tuple. + self.assertTrue( + isinstance(input, expected_type), + f"Expected type {expected_type} but got {type(input)}", + ) + self.assertEqual(input._fields, EXPECTED_FIELDS) + self.assertEqual(a, input.a) + self.assertEqual(b, input.b) + return self_.lin(torch.mul(input.a, input.b)) + + model = torch.nn.parallel.DistributedDataParallel( + NamedTupleModule().cuda(self.rank), device_ids=[self.rank] + ) + inp = TestNamedTupleInput_0(a, b) + # The following would fail if DDP does not propagate NamedTuples correctly. + model(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + model(inp, type(inp)) + + @require_backend_is_available({"gloo"}) + def test_grads_same_across_ranks_with_no_sync(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + if world_size < 2: + self.skipTest("This test requires at least two ranks.") + + class SimpleConditionalModel(nn.Module): + # if rank is 0, uses nn1 on the first pass and nn2 on the second pass. + # else, uses nn3 on the first pass and nn4 on the second pass. + + def __init__(self, rank): + super().__init__() + + self.rank = rank + self.nn1 = nn.Linear(1, 1) + self.nn2 = nn.Linear(1, 1) + self.nn3 = nn.Linear(1, 1) + self.nn4 = nn.Linear(1, 1) + self.state = 0 + + def forward(self, input): + if self.state == 0: + self.state = 1 + if self.rank == 0: + return self.nn1(input) + else: + return self.nn3(input) + else: + self.state = 0 + if self.rank == 0: + return self.nn2(input) + else: + return self.nn4(input) + + model = torch.nn.parallel.DistributedDataParallel( + SimpleConditionalModel(rank), find_unused_parameters=True + ) + mse_loss = nn.MSELoss() + grad_accumulation = 2 + + for microbatch_idx in range(grad_accumulation): + if microbatch_idx < grad_accumulation - 1: + context = model.no_sync + else: + context = nullcontext + + with context(): + input = torch.rand((1, )) + output = model.forward(input) + target = torch.rand((1, )) + + loss = mse_loss(output, target) + loss.backward() + + self.assertTrue( + not any(p.grad is None for p in model.parameters()), + "Gradients can't be None for any model parameter." + ) + grads = torch.cat([p.grad.view(-1) for p in model.parameters()]) + + # Gather all gradients to rank 0. + if rank == 0: + gathered_grads = [torch.zeros_like(grads) for _ in range(world_size)] + else: + gathered_grads = [] + + dist.gather(grads, gather_list=gathered_grads, dst=0) + if rank == 0: + for g in gathered_grads[1:]: + self.assertTrue( + torch.allclose(gathered_grads[0], g), + "Gradients are not the same for all ranks." + ) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_same_across_ranks(self): + # Control flow that is the same across ranks. + batch = 20 + dim = 10 + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used. + local_used_map = model.reducer._get_local_used_map() + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, world_size], device=self.rank, dtype=torch.int32 + ) + + # Validate parameter usage. + variable_usage_tensor = local_used_map + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + # 2nd linear layer is unused + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_invalid_static_graph(self): + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + static_graph=True, + ) + random_input = torch.randn(20, 10, device=self.rank) + ones_input = torch.ones(20, 10, device=self.rank) + # unused parameter in the first iteration got used + # in second iteration. + expected_err = "Your training graph has changed in this iteration" + with self.assertRaisesRegex(RuntimeError, expected_err): + for i in range(2): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, expected_err) + + # used parameter in the first iteration got unused + # in second iteration. + with self.assertRaisesRegex( + RuntimeError, + "Expected to have finished reduction in the prior iteration " + "before starting a new one. This error indicates that your " + "training graph has changed in this iteration, " + "e.g., one parameter is used in first iteration, " + "but then got unused in the second iteration. " + "this is not compatible with static_graph set to True.\n" + "Parameter indices which did not receive grad for", + ): + for i in range(2): + if i % 2 != 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, "Expected to have finished reduction") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_different_across_ranks(self): + # Control flow that is different across ranks. + batch = 20 + dim = 10 + + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + # Control-flow that is rank and input dependent for the + # model. + use_second_layer = ( + torch.equal(x, torch.ones(batch, dim, device=x.device)) + and self.rank == 1 + ) + + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used only on rank 1. + local_used_map = model.reducer._get_local_used_map() + + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, 1], device=self.rank, dtype=torch.int32 + ) + + variable_usage_tensor = local_used_map + # Validate parameter usage. On odd iterations, 2nd param is only + # used on rank 1. + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available({"gloo"}) + def test_scatter_object_list(self): + src_rank = 0 + scatter_list = ( + COLLECTIVES_OBJECT_TEST_LIST + if self.rank == src_rank + else [None for _ in COLLECTIVES_OBJECT_TEST_LIST] + ) + world_size = dist.get_world_size() + scatter_list = scatter_list[:world_size] + i = 0 + while len(scatter_list) < world_size: + scatter_list.append(scatter_list[i]) + i += 1 + + output_obj_list = [None] + dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank) + self.assertEqual( + output_obj_list[0], + COLLECTIVES_OBJECT_TEST_LIST[ + self.rank % len(COLLECTIVES_OBJECT_TEST_LIST) + ], + ) + # Ensure errors are raised upon incorrect arguments. + with self.assertRaisesRegex( + ValueError, + "Expected argument scatter_object_output_list to be a list of size at least 1.", + ): + dist.scatter_object_list([], scatter_list, src=src_rank) + + def _generate_sparse_tensors_for_bucket_assignment_test(self): + tensors = [ + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + ] + + tensors_sparse = [t.to_sparse() for t in tensors] + return tensors_sparse + + def _test_compute_bucket_assignment_by_size(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + + # Create a valid model. The constructor initializes the logger that we use later. + # We never actually use the rest of the model - we only need its logger. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # if we don't pass a logger then we can only check that an exception was thrown. + expected_err = "No support for sparse tensors." + with self.assertRaisesRegex(RuntimeError, expected_err): + tensors_sparse = ( + self._generate_sparse_tensors_for_bucket_assignment_test() + ) + if use_logger: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400], logger=net.logger + ) + else: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400] + ) + if use_logger: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=False) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=True) + + def _determine_expected_error_verify_model_across_rank( + self, group_to_use, diff_num_params=False + ): + # When running with NCCL backend, we don't expect an error on rank 0, + # rather, it will be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING. When + # running with Gloo or with debug mode wrapper, we expect the error + # to be caught inline. + # All ranks report same error when there is a # of parameter + # mismatch since we use allgather in the impl. + if diff_num_params: + expected_err = "DDP expects same model across all ranks" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + is_detail_dbg_mode = dist.get_debug_level() == dist.DebugLevel.DETAIL + if self.rank == 0: + if ( + dist.get_backend(group_to_use) == dist.Backend.NCCL + and not is_detail_dbg_mode + ): + expected_err = "caught collective operation timeout" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + else: + expected_err = None + ctx = self.assertRaises(RuntimeError) + else: + expected_err = "appears not to match" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + def _test_verify_model_across_rank(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + + # Create a valid model. The constructor initializes the logger that we use later. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # Modify the model so that the number of parameters are different for each rank. + # This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes, + # so we can check if the correct error is thrown and is logged. + # We can't do this in the constructor above otherwise the logger will + # not be properly initialized. + net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1) + + # if we pass a logger we can verify that it was logged + with ctx: + if use_logger: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()), net.logger + ) + else: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()) + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(group_to_use) + + # We don't check when self.rank != 0 because the logger doesn't log + # the error "Caught collective operation" as that is not thrown in the reducer. + if use_logger and self.rank != 0: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_with_logger(self): + self._test_verify_model_across_rank(use_logger=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_without_logger(self): + self._test_verify_model_across_rank(use_logger=False) + + def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo): + with ctx: + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), device_ids=[self.rank], process_group=ddp_group + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(ddp_group) + + # can't use verify_ddp_error_logged here because net was never properly constructed + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_shape_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + # Creates network with different sized embedding table on different + # ranks. This should throw an error during DDP init. + net = EmbeddingNetDifferentParams(self.rank) + self._run_test_ddp_model_with_diff_params( + ctx, net, group_to_use, group_gloo + ) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_num_params_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use, diff_num_params=True + ) + + # Creates network with diff # of param across ranks, reducer should + # recognize this and throw appropriate error. + net = EmbeddingNetDifferentParams( + self.rank, diff_num_params=(self.rank == 1) + ) + + self._run_test_ddp_model_with_diff_params( + ctx, + net, + group_to_use, + group_gloo, + ) + + def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view): + model = module_cls() + local_net = copy.deepcopy(model) + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + + # Tests that certain parameters not getting gradient since the + # output is unused in loss computation is supported. Specifically, + # checks that the grads remain unchanged and are the same as local + # training. + inp = torch.randn(10, 10) + + # Ensure that if a param is not used in loss computation, its + # gradient is untouched, i.e. if it is None before it is None after, + # not zero. + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + + loss_dist = b_dist.sum() + loss_dist.backward() + + # Ensure that gradient corresponding to parameter "a" was not + # touched, i.e. it is None and matches the local grad. + if module_cls == DictOutputModule: + self.assertTrue(net.module.module.a.weight.grad is None) + self.assertEqual( + net.module.module.a.weight.grad, local_net.module.a.weight.grad + ) + else: + self.assertTrue(net.module.a.weight.grad is None) + self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad) + + saved_a_local_grad = None + saved_a_dist_grad = None + net.zero_grad() + local_net.zero_grad() + for i in range(6): + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + if i < 2: + # Use both params in loss computation. Later, "a" will go + # unused and we check to ensure DDP supports this and + # gradients remain the same as local training. + t = a @ b + t_dist = a_dist @ b_dist + loss = t.sum() + loss_dist = t_dist.sum() + else: + # Model output "a" unused in loss. + loss = b.sum() + loss_dist = b_dist.sum() + loss.backward() + loss_dist.backward() + if i == 1: + # Save grads to compare with them in next iterations. + if module_cls == DictOutputModule: + saved_a_local_grad = local_net.module.a.weight.grad + saved_a_dist_grad = net.module.module.a.weight.grad + else: + saved_a_local_grad = local_net.a.weight.grad + saved_a_dist_grad = net.module.a.weight.grad + self.assertEqual(saved_a_local_grad, saved_a_dist_grad) + elif i >= 2: + # parameter "a" of both models should be the same and not change + if module_cls == DictOutputModule: + self.assertEqual( + net.module.module.a.weight.grad, saved_a_dist_grad + ) + self.assertEqual( + local_net.module.a.weight.grad, saved_a_local_grad + ) + else: + self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad) + self.assertEqual(local_net.a.weight.grad, saved_a_local_grad) + + # Verify grads are the same + for (local_param, dist_param) in zip( + local_net.parameters(), net.parameters() + ): + local_grad = local_param.grad + dist_grad = dist_param.grad + self.assertEqual(local_grad, dist_grad) + + dist.barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_tuple_module(self): + module_cls = UnusedParamTwoLinLayerNet + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_dict_module(self): + module_cls = DictOutputModule + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_undefined_grad_parity_unused_parameters(self): + # TODO: enable this for general training use cases: + # https://github.com/pytorch/pytorch/issues/58511. + x = torch.ones(1, 2).to(self.rank) + net = Net().to(self.rank) + local_net = copy.deepcopy(net) + net = torch.nn.parallel.DistributedDataParallel( + net, + device_ids=[self.rank], + find_unused_parameters=True, + ) + out = net(x).sum() + local_out = local_net(x).sum() + # Simulates undefined gradients. + torch._C._functions.UndefinedGrad()(out).backward() + torch._C._functions.UndefinedGrad()(local_out).backward() + for (dist_param_name, dist_param), (local_param_name, local_param) in zip( + net.named_parameters(), local_net.named_parameters() + ): + dist_grad = dist_param.grad + local_grad = local_param.grad + self.assertEqual( + dist_grad, + local_grad, + f"""DDP param {dist_param_name} with grad {dist_grad} + does not match local param {local_param_name} with grad + {local_grad}""", + ) + + def _test_different_graph_across_ranks( + self, find_unused_parameters=False, static_graph=False + ): + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + if self.rank == 0: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + torch.manual_seed(31415) + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = ToyModel(self.rank).cuda(self.rank) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused_parameters, + gradient_as_bucket_view=True, + static_graph=static_graph, + ) + random_input = torch.randn(20, 10, device=self.rank) + for i in range(10): + out = ddp_model(random_input) + loss = out.sum() + loss.backward() + return ddp_model + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_different_graph_across_ranks(self): + base_model = self._test_different_graph_across_ranks( + find_unused_parameters=True + ) + self.assertFalse( + base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + static_model = self._test_different_graph_across_ranks(static_graph=True) + self.assertTrue( + static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + for i, j in zip(base_model.parameters(), static_model.parameters()): + self.assertEqual(i, j) + + @require_backend_is_available({"gloo"}) + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_gloo(self): + tensors = [torch.ones(10) * self.rank] + # Kick off some allreduce work on all ranks + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + # Run monitored barrier and ensure it passes + timeout = timedelta(seconds=2) + dist.monitored_barrier(timeout=timeout) + # Check monitored_barrier success with wait_all_ranks=True + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + # All ranks besides 1 call into barrier, rank 0 should report failure + # while others report gloo error. + failed_rank = 1 + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank != failed_rank: + # Other ranks should not pass barrier since rank 0 failed. + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + # We need a barrier since otherwise failed_rank exits too early + # and cause a timeout. + self._barrier(timeout=30) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_subgroup(self): + # Tests that monitored_barrier works as expected on non-default + # process groups. + failed_rank = 1 + timeout = 0.1 + subgroup = dist.new_group(ranks=[0, 1]) + + if self.rank == failed_rank: + return + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(subgroup, timeout) + else: + # Other ranks call into monitored_barrier, but this should be a + # noop because they are not part of the subgroup. Verify that + # there are no errors here. + dist.monitored_barrier(subgroup, timeout) + + def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks): + # tests expected behavior when nonzero rank hangs. + nccl_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + # provide sufficient timeout so communicators + # can be initialized in ctor. + timeout=timedelta(seconds=15), + backend=dist.Backend.NCCL, + ) + gloo_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + backend=dist.Backend.GLOO, + ) + tensors = [torch.ones(10, device=self.rank) * self.rank] + # Let all ranks call allreduce first to set up communicators etc. + # Directly simulating error here will run into store issue described + # in https://github.com/pytorch/pytorch/issues/54524. + nccl_pg.allreduce(tensors).wait(timedelta(seconds=5)) + # All ranks besides 0 call into allreduce. This is to simulate a + # desync across the world, where some ranks call into + # monitored_barrier() and others are stuck in collective comm. In + # practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this + # test to ensure it exits cleanly. + if self.rank != 0: + # Can get different errors here depending on whether gloo-based + # wrapper PG is enabled or not, since with wrapper pg, it will + # fail in a collective synchronization check and not actually + # call into the nccl pg. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + err_regex = "Timed out waiting" + else: + err_regex = "caught collective operation timeout" + with self.assertRaisesRegex(RuntimeError, err_regex): + nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1)) + else: + # Rank 0 should report first (in order) timed out rank or all ranks + # depending on wait_all_ranks flag passed into monitored_barrier. + if wait_all_ranks: + rank_str = ", ".join( + [str(i) for i in range(1, int(self.world_size))] + ) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + else: + expected_first_fail_rank = 1 + err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier" + monitored_barrier_timeout_seconds = timedelta(seconds=0.1) + with self.assertRaisesRegex(RuntimeError, err_regex): + gloo_pg.monitored_barrier( + monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks + ) + + self._barrier(timeout=30) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang(self): + # tests expected behavior when nonzero rank hangs and we want to + # report first timed out rank. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang_wait_all_ranks(self): + # Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out + os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0" + # tests expected behavior when nonzero rank hangs and we want to + # report all timed out ranks. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_rank_0_timeout(self): + # tests error when rank 0 exhausts its given timeout. + process_group = dist.new_group(ranks=list(range(int(self.world_size)))) + timeout = timedelta(seconds=0) + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier" + ): + process_group.monitored_barrier(timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_failure_order(self): + # Ensure that the first (in sorted order) rank is reported when + # multiple ranks fail to pass the monitored_barrier. + # TODO(#54879): Provide ability to wait and report all failed ranks + expected_first_failed_rank = 2 + timeout = timedelta(seconds=2) + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {expected_first_failed_rank}" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank == 1: + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + def test_monitored_barrier_wait_all_ranks(self): + # Tests simple case where > 1 rank does not call into monitored + # barrier and verifies all ranks are reported by rank 0. + if self.rank == 0: + timeout = timedelta(seconds=0.1) + rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))]) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping(self): + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "a.weight", 1: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test when DDP is used with ignored parameters. + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test errors are raised when DDP and module parameters mismatch. + # This generally indicates a bug with DDP and is not expected to + # happen in user applications. + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + net_params, _ = net._build_params_for_reducer() + if self.rank == 0: + print(type(net_params[0])) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + with self.assertRaisesRegex(ValueError, "Expected param to name mapping"): + net._build_debug_param_to_name_mapping(net_params) + + net_params = net_params[:-3] + with self.assertRaisesRegex(ValueError, "Param with name"): + net._build_debug_param_to_name_mapping(net_params) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping_requires_grad(self): + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(10, 10) + # Is not tracked by DDP and should not show up in param to + # name mapping. + self.lin.bias.requires_grad_(False) + + def forward(self, x): + return self.lin(x) + + model = Net() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + expected_mapping = { + 0: "lin.weight", + } + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertEqual(param_to_name_mapping, expected_mapping) + + def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse): + debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF + + class SubModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.embedding_net = EmbeddingNetDifferentParams(0) + self.lin = TwoLinLayerNet() + self.bn = BatchNormNet() + self.lin_layer = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.bn(x) + x = self.lin_layer(x) + x = self.lin.a(x) # self.lin.b param unused + # EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and + # self.embedding_net.lin unused. + return x + + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.sub_module = SubModule() + + def forward(self, x): + return self.sub_module(x) + + model = MyModel() + sparse_embedding_fqns = [] + if ignore_sparse: + for module_name, module in model.named_modules(): + if module == model.sub_module.embedding_net.embedding: + for parameter_name, param in module.named_parameters( + recurse=False + ): + fqn = f"{module_name}.{parameter_name}" + sparse_embedding_fqns.append(fqn) + + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, sparse_embedding_fqns + ) + unused_modules = [ + model.sub_module.embedding_net.lin, + model.sub_module.lin.b, + ] + else: + unused_modules = list(model.sub_module.embedding_net.modules()) + [ + model.sub_module.lin.b, + ] + + expected_unused_param_fqns = [] + used_param_fqns = [] # Validate that these don't mistakenly show up. + fqn_to_param_index = {} + index = 0 + for module_name, module in model.named_modules(): + for parameter_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{parameter_name}" + fqn_to_param_index[fqn] = index + if fqn not in sparse_embedding_fqns: + index += 1 + if module in unused_modules: + expected_unused_param_fqns.append(fqn) + else: + if ( + not ignore_sparse + or module != model.sub_module.embedding_net.embedding + ): + used_param_fqns.append(fqn) + + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + batch, dim = 10, 2 + inp = torch.ones(batch, dim) + for i in range(2): + if i == 0: + out = net(inp) + loss = out.sum() + loss.backward() + else: + try: + out = net(inp) + loss = out.sum() + loss.backward() + except RuntimeError as e: + e = str(e) + + unused_param_substr = e[e.find("did not receive grad") :] + # Validate that each unused param fully qualified name + # shows up in error logs. We do this instead of + # constructing a joined string since order of parameters + # can be different in Reducer. In addition, validate + # param indices show up as well. + for unused_param_fqn in expected_unused_param_fqns: + self.assertTrue( + unused_param_fqn in unused_param_substr + or debug_mode_off + ) + self.assertTrue( + str(fqn_to_param_index[unused_param_fqn]) + in unused_param_substr, + f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}", + ) + + # Validate that used param fqns don't show up in error + # logs. + for used_param_fqn in used_param_fqns: + self.assertFalse(used_param_fqn in unused_param_substr) + # Validate that ignored param fqns don't show up as unused + # (since DDP does not track them) + for sparse_param_fqn in sparse_embedding_fqns: + self.assertFalse(sparse_param_fqn in unused_param_substr) + else: + self.assertTrue(False, "Expected error was not raised!") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_error(self): + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_err_ignore_params(self): + # Tests unused parameter reporting when DDP is configured to ignore + # certain parameters. + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_ddp_inference(self): + # tests that DDP module can be run on a single node with no_grad + # or eval setting and there is no hang. + rank = self.rank + torch.cuda.set_device(rank) + model = Net().cuda() + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + syncbn_model = nn.SyncBatchNorm( + 2, momentum=0.99, track_running_stats=False + ).cuda() + local_syncbn_model = copy.deepcopy(syncbn_model) + syncbn_model = torch.nn.parallel.DistributedDataParallel( + syncbn_model, device_ids=[rank] + ) + inp = torch.randn(10, 2, device=rank) + inp_syncbn = torch.randn(10, 2, 4, 4, device=rank) + tests = [ + (model, local_model, inp), + (syncbn_model, local_syncbn_model, inp_syncbn), + ] + for test in tests: + test_model, test_local_model, test_inp = test + if self.rank == 0: + test_model.eval() + test_local_model.eval() + for _ in range(6): + self.assertEqual( + test_model(test_inp), test_local_model(test_inp) + ) + + # Barrier since only rank 0 runs inference. Test should be + # much faster than 30s, but this is to avoid flakiness. + self._barrier(timeout=30) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_ddp_sync_bn_training_vs_eval(self): + rank = self.rank + torch.cuda.set_device(rank) + # Need to set track_running_stats=False, when track_running_stats=True, + # bn_training is False and sync could not occur in eval model. + model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda( + rank + ) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) + # Test sync occurs in training mode. + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model(inp) + loss = out.sum() + loss.backward() + + # SyncBN allgathers stats across all ranks, so verify call to + # all_gather in profiler. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertNotEqual([], all_gather_calls) + + # Only do inference on one rank. If SyncBN did collective stats sync, + # this would hang/error. + model_inference = model.module + if self.rank == 0: + model_inference.eval() + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model_inference(inp) + loss = out.sum() + loss.backward() + + # Ensure sync does not occur in eval() mode. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertEqual([], all_gather_calls) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_python_error_logged(self): + # Most python exceptions in DDP are raised during init before + # reducer is constructed, so we don't have a logger in those cases. + # However, the below is one example where a python error is thrown + # after reducer is constructed. + model = TwoLinLayerNet().cuda(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + expected_err = "must be callable" + with self.assertRaisesRegex(TypeError, expected_err): + model.register_comm_hook({}, {}) + + verify_ddp_error_logged(model, expected_err) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_static_graph_nested_types(self): + # Tests for static graph training when outputs are not just tensors + # but can be (nested) tuple, list, dict, etc. + rank = self.rank + torch.cuda.set_device(rank) + + class NestedOutputModule(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(100, 1, bias=False) + + def forward(self, inp, output_type): + if output_type == "tuple": + return ( + self.lin(inp), + ( + self.lin(inp), + self.lin(inp), + ), + ) + elif output_type == "list": + return [ + self.lin(inp), + [ + self.lin(inp), + self.lin(inp), + ], + ] + elif output_type == "dict": + return { + "a": self.lin(inp), + "b": { + "c": self.lin(inp), + }, + } + + def get_loss(model_output): + loss = 0.0 + if isinstance(model_output, torch.Tensor): + return model_output.sum() + elif isinstance(model_output, dict): + for value in model_output.values(): + loss += get_loss(value) + elif isinstance(model_output, (tuple, list)): + for x in model_output: + loss += get_loss(x) + else: + raise ValueError(f"Unknown model output type {type(model_output)}") + return loss + + model = NestedOutputModule().cuda(rank) + model_static_graph = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + model_static_graph = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + static_graph=True, + ) + inp = torch.randn(10, 100) + type_mapping = { + "list": list, + "tuple": tuple, + "dict": dict, + } + for output_type in type_mapping.keys(): + for i in range(6): + out = model(inp, output_type=output_type) + loss = get_loss(out) + loss.backward() + self._model_step(model) + out_static = model_static_graph(inp, output_type=output_type) + self.assertTrue(isinstance(out_static, type_mapping[output_type])) + loss_static = get_loss(out_static) + loss_static.backward() + self._model_step(model_static_graph) + for (p, p_static) in zip( + model.parameters(), model_static_graph.parameters() + ): + self.assertEqual(p, p_static) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_returns_tensor_with_no_grad(self): + # Tests case where module returns tensor that does not require grad. + torch.cuda.set_device(self.rank) + + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc2(F.relu(self.fc1(x))) + y = x.clone() + x = x.detach() + assert not x.requires_grad + return (x, y) + + model = MyModel().to(self.rank) + inp = torch.randn(1, 10, device=self.rank) + for (find_unused, static_graph) in itertools.product( + [True, False], [True, False] + ): + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + for i in range(6): + out = ddp(inp) + self.assertFalse(out[0].requires_grad) + o = (out[0] + out[1]).sum() + o.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_detect_ddp_is_actually_static(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10) + + def forward(self, x, find_unused, dynamic): + if find_unused: + if dynamic: + return self.net2(self.net1(x)) + else: + return self.net2(x) + else: + return self.net2(self.net1(x)) + + # Set of unused parameters don't change across iterations + torch.cuda.set_device(self.rank) + model = ToyModel().cuda() + for find_unused in [True, False]: + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused, + ) + inp = torch.randn(1, 10, device="cuda") + for _ in range(6): + out = ddp(inp, find_unused=find_unused, dynamic=False) + loss = out.sum() + loss.backward() + self.assertTrue(ddp.reducer._ddp_graph_static()) + + # Set of unused parameters dynamically change + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + inp = torch.randn(1, 10, device="cuda") + for i in range(6): + out = ddp(inp, find_unused=True, dynamic=i % 2 == 0) + loss = out.sum() + loss.backward() + self.assertFalse(ddp.reducer._ddp_graph_static()) + + def _test_ddp_new_tensor_in_fwd(self, static_graph): + # Test from https://github.com/pytorch/pytorch/issues/60733 + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + self.device = self.fc1.weight.device + + def __init_opt(self): + opt = torch.randn(1, 10, device=self.device) + return opt + + def forward(self, x, opt_1, opt_2, opt_nested): + x = F.relu(self.fc1(x)) + x = self.fc2(x) + if opt_1 is None: + opt_1 = self.__init_opt() + if opt_2 is None: + opt_2 = self.__init_opt() + if opt_nested is None or not torch.is_tensor(opt_nested): + opt_nested = self.__init_opt() + # Test multiple tensors as well as newly created tensors + # within a struct. + return x, opt_1, opt_2, {"tensor": opt_nested} + + model = MyModel().to(self.rank) + for find_unused in [True, False]: + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + broadcast_buffers=False, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + + opt = [None for _ in range(3)] + for i in range(2): + ddp.zero_grad() + x = torch.randn(1, 10, device=self.rank) + out, opt[0], opt[1], opt[2] = ddp( + x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2] + ) + for i in range(len(opt)): + if torch.is_tensor(opt[i]): + self.assertEqual(opt[i].grad_fn, None) + else: + self.assertEqual(opt[i]["tensor"].grad_fn, None) + out.mean().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd_static_graph(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=True) + + def _test_ddp_buffer_hook_allreduce(self, return_futures): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + buffers = [buffer for (_, buffer) in named_buffers.items()] + futs = [ + dist.all_reduce( + buffer, group=ddp.process_group, async_op=True + ).get_future() + for buffer in buffers + ] + if return_futures: + return futs + else: + torch.futures.collect_all(futs).wait() + + hook_pre_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD + ) + hook_post_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD + ) + for hook_run_location in [ + hook_pre_fwd, + hook_post_fwd, + ]: + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook( + model_ddp, buffer_comm_hook, hook_run_location + ) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + broadcast_buffers=False, + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + # Since buffer reduction is done pre-forward, simulate it for + # no hook case here. + # Simulate allreduce appropriately depending on hook location. + if hook_run_location == hook_pre_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + + loss_no_hook = model_ddp_no_hook(inp).sum() + if hook_run_location == hook_post_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + torch.cuda.synchronize() + + # if return_futures, they are only awaited on by DDP + # at the end of the backwards pass for maximum overlap. + if not return_futures: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + # Note that when custom hooks return futures, this + # comparison is not expected to work when hook run location + # is pre-forward pass. This is because the hook does async + # communication and forward pass modifies the buffer without + # appropriate synchronization. Therefore, if returning + # futures from custom buffer hooks, it is advised to set + # hook run location to post forward. + if return_futures and hook_run_location == hook_post_fwd: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce_return_future(self): + self._test_ddp_buffer_hook_allreduce(return_futures=True) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce(self): + self._test_ddp_buffer_hook_allreduce(return_futures=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer_via_hook(self): + # test that _distributed_broadcast_coalesced via registered hook is + # equivalent to DDP's default broadcast coalesced. + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + # named_buffers is a Dict[str, Tensor] representing a mapping + # from buffer name to buffer. + buffers = [buffer for (_, buffer) in named_buffers.items()] + ddp._default_broadcast_coalesced(buffers) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + loss_no_hook = model_ddp_no_hook(inp).sum() + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_remove_autograd_hooks(self): + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(nn.Module): + def __init__(self, device): + super().__init__() + self.error = True + self.fc1 = nn.Linear(10, 10).cuda(device) + + def forward(self, inp): + if self.error: + return self.fc1(SimulateError.apply(inp)) + else: + return self.fc1(inp) + + + # Run with error to trigger backward pass that marks fc1 as being marked + # ready. If we don't remove autograd hooks before running below it would + # fail on the old autograd hook. + model = MyModel(self.rank) + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + model_ddp1 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + + with self.assertRaises(RuntimeError): + model_ddp1(input).sum().backward() + + # Remove autograd hooks on old instance. + model_ddp1._remove_autograd_hooks() + + # Try another DDP instance without error now. + model.error = False + model_ddp2 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp2(input).sum().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @unittest.skip("Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751") + def test_ddp_has_finalized(self): + + @dataclass + class MyClass: + obj: torch.Tensor + + class MyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.rank = rank + self.fc1 = nn.Linear(1024, 1024).cuda(rank) + self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank) + + def forward(self, inp): + if self.rank == 0: + return self.fc1(inp), MyClass(self.fc2(inp)) + else: + return self.fc1(inp), self.fc2(inp) + + model = MyModel(self.rank) + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter. + ) + + if self.rank == 0: + out1, _ = ddp(input) + out1.sum().backward() + else: + out1, out2 = ddp(input) + (out1.sum() + out2.sum()).backward() + + if self.rank == 0: + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp._check_reducer_finalized() + + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp(input) + else: + ddp._check_reducer_finalized() + ddp(input) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "TORCH_NCCL_USE_COMM_NONBLOCKING only applies to NCCL" + ) + def test_nccl_init_abort(self): + """ + Tests that we can abort a NCCL communicator during initialization and + recover appropriately. + """ + # Reinitialize global process group with TORCH_NCCL_USE_COMM_NONBLOCKING=1 + os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1" + dist.destroy_process_group() + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + + # Abort pg in background thread. + running = True + + def abort(device): + pg = _get_default_group() + while running: + pg._get_backend(torch.device(device))._shutdown() + time.sleep(1) + + if self.rank != 1: + import threading + t = threading.Thread(target=abort, args=(self.rank,)) + t.start() + with self.assertRaises(RuntimeError): + # First collective triggers initialization via ncclCommInitRank. + torch.distributed.barrier() + running = False + t.join() + + def _run_ddp_update_process_group(self, new_pg): + def get_num_torch_recompiles(): + guard_failures = torch._dynamo.utils.guard_failures + num_recompiles = [len(guard_failures[code]) for code in guard_failures] + return 0 if len(num_recompiles) == 0 else max(num_recompiles) + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + # 4MB for multiple buckets. + self.fc1 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc2 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc3 = torch.nn.Linear(1024, 1024).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc3(self.fc2(self.fc1(inp))) + + + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + model = torch.compile(ddp) + + def run_iteration(): + # Run regular iteration. + out = model(input, error=False) + out.sum().backward() + torch.cuda.synchronize() + + # Run with error. + with self.assertRaises(RuntimeError): + out = model(input, error=True) + out.sum().backward() + torch.cuda.synchronize() + + run_iteration() + assert 0 == get_num_torch_recompiles() + + if new_pg: + # Now reduce world_size and run iteration. + group_size_2 = dist.new_group(ranks=[0, 1]) + ddp._update_process_group(group_size_2) + if self.rank in [0, 1]: + run_iteration() + + # Increase the world size and run iteration. + group_size_3 = dist.new_group(ranks=[1, 2, 3]) + ddp._update_process_group(group_size_3) + if self.rank in [1, 2, 3]: + run_iteration() + + # Back to default size. + ddp._update_process_group(_get_default_group()) + run_iteration() + else: + # Create default pg of smaller size. + dist.destroy_process_group() + + if self.rank in [1, 2, 3]: + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=3, + rank=self.rank - 1, + timeout=timedelta(seconds=default_pg_timeout), + ) + ddp._update_process_group(_get_default_group()) + run_iteration() + dist.destroy_process_group() + + # Need a barrier here to ensure ranks 1, 2 and 3 are done. + self._barrier(wait_for=4) + + # Need to init pg again for "_barrier" to succeed. + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=4, + rank=self.rank, + timeout=timedelta(seconds=default_pg_timeout), + ) + + # Validate no more recompiles. + assert 0 == get_num_torch_recompiles() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_new_group(self): + self._run_ddp_update_process_group(new_pg=True) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_default_group(self): + self._run_ddp_update_process_group(new_pg=False) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_grad_undefined(self): + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + self.fc1 = torch.nn.Linear(10, 10).cuda(device) + self.fc2 = torch.nn.Linear(10, 10).cuda(device) + self.fc3 = torch.nn.Linear(10, 10).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc2(self.fc1(inp)) + + + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + + try: + ddp(input, True).sum().backward() + except RuntimeError: + ddp._update_process_group(_get_default_group()) + + # Reset grads. + for param in ddp.parameters(): + param.grad = None + + # Run ddp again. + ddp(input, False).sum().backward() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_no_find_unused(self): + ddp = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(10, 10).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + ddp._update_process_group(_get_default_group()) + + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer(self): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + class NetWithBuffers(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + return self.b(self.a(x)) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + if rank == 0: + model_ddp.module.buffer = model_ddp.module.buffer + 1 + loss = model_ddp(inp).sum() + loss.backward() + # Ensure all buffers are synchronized. + bufs = [ + torch.empty_like(model_ddp.module.buffer) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(bufs, model_ddp.module.buffer) + rank_0_buf = bufs[0] + for buf in bufs[1:]: + self.assertEqual(rank_0_buf, buf) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_static_graph_multi_forward(self): + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(10, 10) + self.relu = nn.ReLU() + + def forward(self, x): + return self.relu(self.lin(x)) + + torch.cuda.set_device(self.rank) + torch.manual_seed(42 << 1337 % (self.rank + 1)) + model = Net().cuda(self.rank) + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], static_graph=True + ) + inp = torch.ones(2, 10, device="cuda") + for _ in range(3): + model.zero_grad() + local_model.zero_grad() + a = model(inp) + b = model(inp) + loss = a.sum() + b.sum() + loss.backward() + # Grads should be equal to a local model that ran through inp twice and averaged grads + if self.rank == 0: + inp_clone = inp.clone() + for _ in range(2): + a = local_model(inp_clone) + b = local_model(inp_clone) + loss = a.sum() + b.sum() + loss.backward() + + ws = dist.get_world_size() + for p in local_model.parameters(): + p.grad.data = p.grad / dist.get_world_size() + + for p_ddp, p_local in zip( + model.parameters(), + local_model.parameters() + ): + self.assertTrue( + torch.allclose( + p_ddp.grad, p_local.grad + ), + f"{p_ddp.grad} vs {p_local.grad}" + ) + + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_sync_bn_logged(self): + model = BN_NET + rank = self.rank + # single gpu training setup + model_gpu = model.cuda(rank) + no_sync_bn = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model_gpu), + device_ids=[self.rank], + ) + ddp_logging_data = no_sync_bn._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", True) + self.assertFalse(sync_bn_logged) + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu) + model_DDP = torch.nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=[self.rank], + ) + ddp_logging_data = model_DDP._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", False) + self.assertTrue(sync_bn_logged) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_stateless_api_with_ddp(self): + class MockModule(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.l1 = torch.nn.Linear(1, 1) + buffer = torch.ones(1) + self.register_buffer("buffer", buffer) + + def forward(self, x): + return self.l1(x) + self.buffer + + device = self.rank + module = MockModule().to(device) + module = torch.nn.parallel.DistributedDataParallel( + module, device_ids=[device] + ) + x = torch.rand((1, 1)).to(device) + weight = torch.tensor([[1.0]], device=device, requires_grad=True) + bias = torch.tensor([0.0], device=device, requires_grad=True) + buffer = torch.tensor([0.0], device=device) + parameters = { + "module.l1.weight": weight, + "module.l1.bias": bias, + "module.buffer": buffer, + } + prev_weight = module.module.l1.weight.clone() + prev_buffer = module.module.buffer.clone() + + res = torch.func.functional_call(module, parameters, x) + self.assertEqual(x, res) + # check that the weight remain unmodified + cur_weight = module.module.l1.weight + cur_buffer = module.module.buffer + self.assertEqual(cur_weight, prev_weight) + self.assertEqual(cur_buffer, prev_buffer) + # run a backward pass and check the gradients + res.backward() + self.assertIsNotNone(weight.grad) + self.assertIsNotNone(bias.grad) + # Gradient was not calculated for the module stated and buffers + self.assertIsNone(buffer.grad) + self.assertIsNone(module.module.l1.weight.grad) + self.assertIsNone(module.module.l1.bias.grad) + self.assertIsNone(module.module.buffer.grad) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_forward_backward_hook(self): + class DummyTestModel(nn.Module): + def __init__(self) -> None: + super().__init__() + torch.manual_seed(0) + self.fc = nn.Linear(2, 2) + + def forward(self, x): + return self.fc(x) + + def relu_hook(module, input): + return nn.functional.relu(input[0]) + + def gelu_hook(module, _input, output): + return nn.functional.gelu(output) + + def celu_hook(module, _input, output): + return (nn.functional.celu(output[0]),) + + local_model = DummyTestModel() + ddp_model = DummyTestModel() + local_model.fc.register_forward_pre_hook(relu_hook) + local_model.fc.register_forward_hook(gelu_hook) + ddp_model.fc.register_forward_pre_hook(relu_hook) + ddp_model.fc.register_forward_hook(gelu_hook) + local_model.fc.register_backward_hook(celu_hook) + ddp_model.fc.register_backward_hook(celu_hook) + ddp_model = DistributedDataParallel( + ddp_model.to(self.rank), device_ids=[self.rank] + ) + input_data = torch.rand(5, 2) + output_local = local_model(input_data) + output_ddp = ddp_model(input_data.to(self.rank)) + self.assertEqual(output_local, output_ddp) + output_local.sum().backward() + output_ddp.sum().backward() + ddp_grads = [p.grad for p in ddp_model.parameters()] + self.assertEqual(ddp_grads[0], local_model.fc.weight.grad) + self.assertEqual(ddp_grads[1], local_model.fc.bias.grad) + + def _test_hook_pickling(self, hook, hook_state): + torch.manual_seed(0) + learning_rate = 0.01 + chkpt_file = tempfile.gettempdir() + "/checkpoint.pt" + rank = self.rank + + input = torch.randn(7, 1, device=rank) + target = torch.randn(7, 5, device=rank) + net = torch.nn.Linear(1, 5).to(rank) + ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank]) + dummy_ddp_model = DistributedDataParallel( + copy.deepcopy(net), device_ids=[rank] + ) + optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate) + ddp_model.register_comm_hook(hook_state, hook) + ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + out = ddp_model(input) + loss = F.mse_loss(out, target) + loss.backward() + optimizer.step() + + state = { + "state_dict": ddp_model.state_dict(), + "comm_hook": hook, + "comm_hook_state": hook_state, + } + + if rank == 0: + with self.assertLogs("torch.distributed") as captured: + torch.save(state, chkpt_file) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group is not serializable and excluded from a saved state.", + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % rank} + with self.assertLogs("torch.distributed") as captured: + checkpoint = torch.load(chkpt_file, map_location=map_location) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group will be set to a default group (i.e. the world size).\ + If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.", + ) + + dummy_ddp_model.load_state_dict(checkpoint["state_dict"]) + dummy_hook = checkpoint["comm_hook"] + dummy_hook_state = checkpoint["comm_hook_state"] + dummy_optimizer = torch.optim.SGD( + dummy_ddp_model.parameters(), lr=learning_rate + ) + + # Check that loaded function is correct + self.assertEqual(dummy_hook.__qualname__, hook.__qualname__) + + # Check that all slots' keys were restored correctly + self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__) + + # Check that all slots' attributes are restored correctly + # Excluding ``process_group`` and ``rng``. + for entry in dummy_hook_state.__slots__: + if entry != "process_group" and entry != "rng": + self.assertEqual( + getattr(dummy_hook_state, entry), getattr(hook_state, entry) + ) + + # Check that ``process_group`` was set to default + self.assertEqual(dummy_hook_state.process_group, _get_default_group()) + + # Check that a random state was restored properly: + # ``np.random.RandomState.get_state`` returns a tuple with entries: + # ``bit_generator`` - str, + # ``state.key`` - ndarray dtype[uint32], + # ``state.pos`` - int, + # ``has_gauss`` - int, + # ``gauss`` - float + # (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi) + # To make sure random state was restored properly, all entries should equal the original + for entry1, entry2 in zip( + hook_state.rng.get_state(), dummy_hook_state.rng.get_state() + ): + np.testing.assert_array_equal(entry1, entry2) + + dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook) + dummy_ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + dummy_optimizer.zero_grad() + out_origin = ddp_model(input) + out_dummy = dummy_ddp_model(input) + loss_origin = F.mse_loss(out_origin, target) + loss_dummy = F.mse_loss(out_dummy, target) + loss_origin.backward() + loss_dummy.backward() + optimizer.step() + dummy_optimizer.step() + + # Check that gradients after 10 epochs are the same + for orig_param, dummy_param in zip( + ddp_model.parameters(), dummy_ddp_model.parameters() + ): + self.assertEqual(orig_param.grad, dummy_param.grad) + + dist.barrier() + if rank == 0: + os.remove(chkpt_file) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + @skip_but_pass_in_sandcastle_if( + True, "Skipped due to flakiness" + ) + def test_ddp_hook_pickling_powerSGD(self): + + hook = powerSGD.powerSGD_hook + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=4, + ) + self._test_hook_pickling(hook, powersgd_state) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device_mesh_initialization(self): + """ + Test DDP with device_mesh initialization. + """ + world_size = int(os.environ["WORLD_SIZE"]) + + from torch.distributed.device_mesh import init_device_mesh + device_mesh = init_device_mesh("cuda", (world_size,)) + + pg = _get_default_group() + + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_mesh=device_mesh) + self.assertEqual(ddp_model.device_mesh, device_mesh) + + with self.assertRaisesRegex( + RuntimeError, "Cannot specify both process_group and device_mesh arguments." + ): + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, process_group=pg, device_mesh=device_mesh + ) + + with self.assertRaisesRegex( + RuntimeError, "Only 1D device mesh is supported," + ): + device_mesh = init_device_mesh("cuda", (2, world_size // 2)) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_mesh=device_mesh + ) + + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_compile_static_graph(self): + "Tests that DDP works with torch compile when static_graph=True" + model = torch.nn.Linear(10, 10).cuda(self.rank) + model_clone = copy.deepcopy(model) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + ddp_static = torch.nn.parallel.DistributedDataParallel( + model_clone, + device_ids=[self.rank], + static_graph=True + ) + ddp = torch.compile(ddp) + ddp_static = torch.compile(ddp_static) + input = torch.rand(10, 10).cuda(self.rank) + # verify output and gradient parity + for _ in range(6): + out_ddp = ddp(input).sum() + out_ddp_static = ddp_static(input).sum() + self.assertEqual(out_ddp, out_ddp_static) + out_ddp.backward() + out_ddp_static.backward() + for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()): + self.assertEqual(p1.grad, p2.grad) + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sink_noclone(self): + "Tests that we can configure DDP to avoid clone" + + class OpPatcher(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + func_packet = func._overloadpacket + if func_packet == torch.ops.aten.clone: + raise RuntimeError("clone encountered!") + kwargs = kwargs if kwargs else {} + return func(*args, **kwargs) + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc = torch.nn.Linear(10, 10) + + def forward(self, input): + return self.fc(input) + + model = MyModel().cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + ddp._set_ddp_sink_clone(False) + input = torch.rand(10, 10).cuda(self.rank) + + with OpPatcher() as patcher: + ddp(input).sum().backward() + + + +instantiate_parametrized_tests(DistributedTest._DistTestBase) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9230043c18c258142027e3f813d98dfb0d9fb585 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py @@ -0,0 +1,66 @@ +# mypy: allow-untyped-defs + +from contextlib import contextmanager +from datetime import timedelta +from functools import ( + partial, + wraps, +) + +import torch.distributed as dist +import torch.distributed.distributed_c10d as c10d + +class MockProcessGroup(dist.ProcessGroup): + + def __init__(self, rank, world): + super().__init__(rank, world) + + def getBackendName(self): + return "mock_process_group" + +def create_mock_pg(prefix_store, rank, world_size, timeout): + return MockProcessGroup(rank, world_size) + +dist.Backend.register_backend('mock_process_group', create_mock_pg) + +def mock_init_dist(rank, world_size): + # !!! WARNING !!! + # Kids don't try this at home, this is a cute pile of hacks that + # depends on a small mountain of c10d internals + assert not dist.is_initialized() + store = dist.HashStore() + # Trick _store_based_barrier into believing everyone else already checked-in + # Zero is the group index + store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1) + dist.init_process_group( + backend="mock_process_group", + rank=rank, + world_size=world_size, + store=store, + group_name="fake", + timeout=timedelta(seconds=1)) + +@contextmanager +def with_dist(rank=0, world_size=2): + """ + Context manager that initializer c10d with a fake process group. + """ + mock_init_dist(rank=rank, world_size=world_size) + try: + yield + finally: + dist.destroy_process_group() + +def with_fake_comms(func=None, rank=0, world_size=2): + """ + Function wrapper that inits a fake process group designed for testing. + Right now only querying for world size is available + """ + if func is None: + return partial(with_fake_comms, rank=rank, world_size=world_size) + + @wraps(func) + def wrapper(self, *args, **kwargs): + with with_dist(rank, world_size): + func(self, *args, **kwargs) + return wrapper diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..ff4cbe56abc9edf395f6f2cf8f98c59586cd07c6 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py @@ -0,0 +1,31 @@ +# mypy: allow-untyped-defs + +import torch.distributed as dist + +from torch._C._distributed_c10d import ( + FakeProcessGroup, +) + + +class FakeStore(dist.Store): + """ + A fake store is a fake Key-Value store simply for initialization usage + the of fake process group, one can either use FakeStore or HashStore. + """ + + +def _create_fake_pg(prefix_store, rank, world_size, timeout): + """ + A fake process group (not related to FakeTensor) is a process group which + doesn't actually do any communication, it just hallucinates some + communication. You can run a single rank with a fake process group + without needing multiple processes (simulates per-rank behavior) + + NOTE: This is not a real process group, and it would produce wrong results + for every collective. It should be used as a convinient tool when playing + with distributed but don't care about the actual data. + """ + return FakeProcessGroup(rank, world_size) + + +dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda']) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..e9984ba354cee1daa7a5db1b3daaafe7256ce077 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py @@ -0,0 +1,543 @@ +# mypy: allow-untyped-defs + +import sys +import threading +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union +from functools import partial, reduce + +import torch +import torch.distributed as dist +import weakref +from torch._C._distributed_c10d import ( + _create_work_from_future, + AllgatherOptions, + AllreduceOptions, + AllToAllOptions, + BarrierOptions, + BroadcastOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + ReduceOp, +) +from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp +from torch.futures import Future +from torch.utils import _pytree as pytree + +""" +TODO: +Lots of missing collectives. +Collectives validation. +Make timeout robust by making collectives respect the test deadline. +Make tests robust by making collectives interruptible. +We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures. + +""" + + +def flatten_list(lst): + return pytree.tree_leaves(lst) + + +def ret_work(ret): + fut = Future() + fut.set_result(ret) + return _create_work_from_future(fut) + +def binop_reduce(tensors, op): + res = op(torch.stack(tensors), dim=0) + if isinstance(res, torch.Tensor): + return res + # min/max return a namedtuple + return res.values + +def bitwise_reduce(tensors, op): + return reduce(op, tensors) + +_reduce_ops = { + ReduceOp.SUM: partial(binop_reduce, op=torch.sum), + ReduceOp.AVG: partial(binop_reduce, op=torch.mean), + ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod), + ReduceOp.MIN: partial(binop_reduce, op=torch.min), + ReduceOp.MAX: partial(binop_reduce, op=torch.max), + ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and), + ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or), + ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor), +} + +class AllToAll: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_tensor_list, _ = data[dest_rank] + for src_rank in range(world_size): + _, input_tensor_list = data[src_rank] + output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) + +class AllToAllBase: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_buffer, _, output_split_sizes, _ = data[dest_rank] + + output_indexes = self._size_cumsum(output_buffer.size(0), output_split_sizes, world_size) + + for src_rank in range(world_size): + _, input_buffer, _, input_split_sizes = data[src_rank] + input_indexes = self._size_cumsum(input_buffer.size(0), input_split_sizes, world_size) + + output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_( + input_buffer[input_indexes[dest_rank]:input_indexes[dest_rank + 1]] + ) + + def _size_cumsum(self, buf_size: int, sizes: Union[torch.Tensor, List[int], None], world_size: int) -> torch.Tensor: + if sizes is None or len(sizes) == 0: + sizes = torch.full( + (world_size,), buf_size // world_size, dtype=torch.int64 + ) + if not isinstance(sizes, torch.Tensor): + sizes = torch.tensor(sizes, dtype=torch.int64) + assert sizes.dtype == torch.int64 + sizes = torch.cumsum( + torch.cat( + ( + torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes + ), + dim=0 + ), + dim=0 + ) + return sizes + +class AllReduce: + def __init__(self, op): + if op.op not in _reduce_ops: + raise NotImplementedError( + f"AllReduce op {op.op} not supported on multithreaded pg for now." + ) + self.op = op.op + + @torch.no_grad() + def work(self, data): + for i in range(len(data[0])): + tensors = [] + # use rank0 as the device for sum + rank_0_device = data[0][i].device + # collect all data to the list and make them + # all on rank 0 device + for src_rank in range(0, len(data)): + tensors.append(data[src_rank][i].to(rank_0_device)) + + # now mimic reduce across all ranks + res = _reduce_ops[self.op](tensors) + + # copy all the reduced value to each rank + for src_rank in range(len(data)): + data[src_rank][i].copy_(res.to(data[src_rank][i].device)) + + +class AllGather: + @torch.no_grad() + def work(self, data): + for src_rank in range(len(data)): + in_tensor_list = data[src_rank][1] + # Can't handle all_gather with multiple tensors + assert len(in_tensor_list) == 1 + src_tensor = in_tensor_list[0] + + for dest in data: + dest_tensor = dest[0][0][src_rank] + dest_tensor.copy_(src_tensor) + + +class Scatter: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + src_in_tensor_list = data[self.src][1] + # Can't handle scatter with multiple input tensor list + assert len(src_in_tensor_list) == 1 + src_in_tensors = src_in_tensor_list[0] + + for rank, each_rank_data in enumerate(data): + out_tensor_list = each_rank_data[0] + # Can't handle scatter with multiple output tensor + assert len(out_tensor_list) == 1 + dest_tensor = out_tensor_list[0] + dest_tensor.copy_(src_in_tensors[rank]) + + +class Gather: + def __init__(self, dst): + self.dst = dst + + @torch.no_grad() + def work(self, data): + # Can't handle gather with multiple tensor lists + assert len(data[self.dst][0]) == 1 + out_tensor_list = data[self.dst][0][0] + for rank, each_rank_data in enumerate(data): + src_in_tensor_list = each_rank_data[1] + # Can't handle gather with multiple tensor lists + assert len(src_in_tensor_list) == 1 + dest_tensor = out_tensor_list[rank] + dest_tensor.copy_(src_in_tensor_list[0]) + +class ReduceScatter: + def __init__(self, op): + if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG: + raise NotImplementedError(f"ReduceScatter does not support {op}") + self.op = op + + @torch.no_grad() + def work(self, data): + start_reduction = [False for _ in range(len(data))] + for each_rank_data in data: + # Can't handle reduce_scatter with multiple scatter list + assert len(each_rank_data[1]) == 1 + to_scatter = each_rank_data[1][0] + for i in range(len(to_scatter)): + dest_tensor_on_rank_i = data[i][0] + # Can't handle reduce_scatter with multiple output tensor + assert len(dest_tensor_on_rank_i) == 1 + dst_tensor_device = dest_tensor_on_rank_i[0].device + if not start_reduction[i]: + dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) + start_reduction[i] = True + else: + dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device)) + if self.op == dist.ReduceOp.AVG: + num_ranks = len(data) + for each_rank_data in data: + each_rank_data[0][0] /= num_ranks + + +class Broadcast: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + in_tensor_list = flatten_list(data[self.src]) + for i in range(len(data)): + out_tensor_list = flatten_list(data[i]) + for j in range(len(in_tensor_list)): + out_tensor_list[j].copy_(in_tensor_list[j]) + + +class Collective: + def __init__(self, world_size, collective, pg): + self._world_size = world_size + self._collective = collective + + self._start_cond = threading.Condition() + self._done_cond = threading.Condition() + + self._data = [None] * world_size + self._count = 0 + self._done = False + + self._pg = pg + + def join(self, rank, data): + with self._start_cond: + self._data[rank] = data + self._count += 1 + + # notify rank 0 + if self._count == self._world_size: + if rank > 0: + self._start_cond.notify() + + if rank == 0: + self._start_cond.wait_for( + lambda: self._count == self._world_size or self._pg._terminate.is_set() + ) + # SystemExit is not a subclass of Exception but BaseException + # and can be distinguished from normal exception raised from program errors + # so that we can hide it from the exception queue + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + + with self._done_cond: + # wait for rank 0 to finish + if rank > 0: + self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set()) + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + else: + # copy data around + self._collective.work(self._data) + self._done = True + self._done_cond.notify_all() + return ret_work(data) + + +class ProcessLocalGroup(dist.ProcessGroup): + _coll_lock = threading.Lock() + _cur_coll_on_pgs = {} + + _terminate = threading.Event() + + @classmethod + def _start_coll(cls, collective, pg): + with cls._coll_lock: + # pg_name is unique, we use that to record the mapping between pg and collective + if pg.pg_name not in cls._cur_coll_on_pgs: + cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls) + return cls._cur_coll_on_pgs[pg.pg_name] + + @classmethod + def _end_coll(cls, collective, pg): + # This is racily called by all ranks, so only one will work + with cls._coll_lock: + if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective: + cls._cur_coll_on_pgs.pop(pg.pg_name) + + @classmethod + def exception_handle(cls, exc): + cls._terminate.set() + for coll in cls._cur_coll_on_pgs.values(): + with coll._start_cond: + coll._start_cond.notify() + with coll._done_cond: + coll._done_cond.notify_all() + + @classmethod + def reset(cls): + with cls._coll_lock: + cls._cur_coll_on_pgs = {} + cls._terminate.clear() + + def alltoall_base( + self, + output_buffer: torch.Tensor, + input_buffer: torch.Tensor, + output_split_sizes: Optional[List[int]], + input_split_sizes: Optional[List[int]], + opts=AllToAllOptions() + ) -> torch.Tensor: + coll = ProcessLocalGroup._start_coll(AllToAllBase(), self) + res = coll.join(self._rank, (output_buffer, input_buffer, output_split_sizes, input_split_sizes)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): + coll = ProcessLocalGroup._start_coll(AllToAll(), self) + res = coll.join(self._rank, (output_tensor_list, input_tensor_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def barrier(self, opts=BarrierOptions()): + return self.allreduce(tensor_list=[torch.ones(1)]) + + def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()): + coll = ProcessLocalGroup._start_coll(AllGather(), self) + res = coll.join(self._rank, (output_tensors, input_tensor)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + tensor_list = list(torch.chunk(output_tensor, self._world_size)) + return self.allgather([tensor_list], [input_tensor], opts) + + def broadcast(self, tensor_list, opts=BroadcastOptions()): + coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def gather(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()): + coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self) + res = coll.join(self._rank, (output_tensor, scatter_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()): + tensor_list = list(torch.chunk(input_tensor, self._world_size)) + return self.reduce_scatter([output_tensor], [tensor_list], opts) + + def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()): + works = [ + self._reduce_scatter_base(output_tensor, input_tensor, opts) + for output_tensor, input_tensor + in zip(output_tensors, input_tensors) + ] + for work in works[:-1]: + work.wait() + return works[-1] + + def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()): + res = None + for o_t, i_t in zip(output_tensor_list, input_tensor_list): + res = self._allgather_base(o_t, i_t) + return res + + def __init__(self, rank, world_size): + super().__init__(rank, world_size) + self._rank = rank + self._world_size = world_size + world = dist.distributed_c10d._world + if isinstance(world, ThreadLocalWorld): + world = world._get_world() + self._world = weakref.ref(world) + self._ctx = torch.autograd.set_multithreading_enabled(False) + + def size(self): + return self._world_size + + @property + def pg_name(self): + """ + return the global registered name of the current pg in the world + """ + return self._world().pg_names[self] + + @property + def group_name(self): + return self.pg_name + + def getBackendName(self): + return "threaded" + + def __repr__(self): + return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}" + + +def _create_threaded_pg(prefix_store, rank, world_size, timeout): + pg = ProcessLocalGroup(rank, world_size) + # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional + # When device mesh involves sub groups while store based barrier is not enabled in c10d, + # even though threaded pg actual collectives are assumed to be single threaded, + # different threads may be initializing different groups, + # leading to race conditions. + # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups + # (dim 0 and 1) would be initialized in different threads independently. + # In this case we can no longer rely on class or global variables + # but have to rely on store based barrier to make sure each group + # is ready separately before we can invoke collectives in any of the groups. + + # the prefix store is already per group so we pass an empty name here + _store_based_barrier(rank, prefix_store, "", world_size, timeout) + return pg + + +dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"]) + + +@dataclass +class WorldData: + default_pg: dist.ProcessGroup + pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]] + pg_names: Dict[dist.ProcessGroup, str] + pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]] + pg_backend_config: Dict[dist.ProcessGroup, str] + group_count: int + tags_to_pg: Dict[str, List[dist.ProcessGroup]] + pg_to_tag: Dict[dist.ProcessGroup, str] + pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]] + pg_default_device: Dict[dist.ProcessGroup, torch.device] + + +class ThreadLocalWorld: + _world = threading.local() + + def _get_world(self) -> WorldData: + if not hasattr(ThreadLocalWorld._world, "world"): + ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {}) + return ThreadLocalWorld._world.world + + @property + def default_pg(self): + return self._get_world().default_pg + + @default_pg.setter + def default_pg(self, value): + self._get_world().default_pg = value + + @property + def pg_map(self): + return self._get_world().pg_map + + @property + def pg_names(self): + return self._get_world().pg_names + + @property + def pg_group_ranks(self): + return self._get_world().pg_group_ranks + + @property + def pg_backend_config(self): + return self._get_world().pg_backend_config + + @property + def group_count(self) -> int: + return self._get_world().group_count + + @group_count.setter + def group_count(self, value): + self._get_world().group_count = value + + @property + def tags_to_pg(self): + return self._get_world().tags_to_pg + + @property + def pg_to_tag(self): + return self._get_world().pg_to_tag + + @property + def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]: + return self._get_world().pg_coalesce_state + + @property + def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]: + return self._get_world().pg_default_device + + +_old_pg_world = None +_ctx_manager = None + + +def _install_threaded_pg(): + global _old_pg_world + global _ctx_manager + _old_pg_world = dist.distributed_c10d._world + dist.distributed_c10d._world = ThreadLocalWorld() + _ctx_manager = torch.autograd.set_multithreading_enabled(False) + + return dist.distributed_c10d._world + + +def _uninstall_threaded_pg(): + dist.distributed_c10d._world = _old_pg_world diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8148d80a1948964747edf0a4ccdd97acaba316db Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63be3e8ed40ef1508d037038967f5024c5fa5e68 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/tensorpipe_rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6b9a843b6294f316ebd91180c8ecb9ddf69524 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py @@ -0,0 +1,2783 @@ +# mypy: allow-untyped-defs + +import sys +import threading +import time +from enum import Enum +import random +import torch +import torch.nn as nn +from datetime import timedelta +import torch.distributed as dist +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributed.rpc import RRef +from torch.testing._internal.common_utils import IS_MACOS, skip_but_pass_in_sandcastle_if +from torch.testing._internal.dist_utils import ( + dist_init, + initialize_pg, + wait_until_node_failure, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu + + +# Right now we test up to 3-layer nested rpc calls. +# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id +# sent from prev rank respectively. +# rpc_done[2] and ctx_ids[2] represents for prev of prev rank. +# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank. +# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used. +rpc_done = [False, False, False, False] +ctx_ids = [-1, -1, -1, -1] + +known_context_ids = set() + +requires_grad_tensor = torch.ones(3, 3, requires_grad=True) + +# Send rpc done info and context_id to +# dst_rank = (self.rank + rank_distance) % self.world_size +# we don't need a lock here since the GIL is held while executing remote +# python UDFs, so access is serialized across several workers. +def _set_rpc_done(ctx_id, rank_distance): + global rpc_done + global ctx_ids + global known_context_ids + rpc_done[rank_distance] = True + ctx_ids[rank_distance] = ctx_id + known_context_ids.add(ctx_id) + + +def _check_rpc_done(rank_distance): + while not rpc_done[rank_distance]: + time.sleep(0.1) + + +def _torch_ones(sizes, requires_grad=False): + return torch.ones(sizes, requires_grad=requires_grad) + +# This method must be called on the rref owner, and verifies that the grad of +# rref tensor equals to the given grad. +def _compare_owner_value(context_id, rref, grad): + grads = dist_autograd.get_gradients(context_id) + x = grads[rref.local_value()] + if x.is_sparse: + assert grad.is_sparse + x = x.to_dense() + grad = grad.to_dense() + else: + assert not grad.is_sparse + return torch.equal(x, grad) + + +def create_tensor(): + return torch.ones((3, 3), requires_grad=True) + + +def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32): + i = [[0, 1, 1], [2, 0, 2]] + v = [3.2, 4.1, 5.3] + tensor = torch.sparse_coo_tensor( + i, v, (3, 3), requires_grad=requires_grad, dtype=dtype + ) + if coalesce: + tensor = tensor.coalesce() + return tensor + + +@torch.jit.script +def create_torchscript_tensor() -> torch.Tensor: + return torch.ones((3, 3)).requires_grad_() + + +def my_py_add(t1, t2): + return torch.add(t1, t2) + + +def my_scalar_add(a, b): + return a + b + + +def my_rref_add(rref_t1, t2): + ret = torch.add(rref_t1.local_value(), t2) + return ret + + +@torch.jit.script +def my_script_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor: + t1 = ref_t1.to_here() + return torch.add(t1, t2) + + +def my_nested_rref_add(dst, rref_t1, t2): + return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2)) + + +def ret_requires_grad(): + return requires_grad_tensor + + +def my_py_nested_call(t1, t2, dst, world_size, hops): + next_dst = (dst + 1) % world_size + if hops > 0: + return rpc.rpc_sync( + worker_name(next_dst), + my_py_nested_call, + args=(t1, t2, next_dst, world_size, hops - 1), + ) + else: + return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2)) + + +# after dist autograd context is cleaned up, it should be cleaned up on other +# nodes. This helper allows timeout_seconds for those RPCs to be completed, and +# ensures that all the contexts have been cleaned up in that timeframe.any +def _all_contexts_cleaned_up(timeout_seconds=10): + global known_context_ids + start = time.time() + context_id_to_raised = set() + while ( + time.time() - start < timeout_seconds + and context_id_to_raised != known_context_ids + ): + for context_id in known_context_ids: + try: + dist_autograd._retrieve_context(context_id) + except RuntimeError: + context_id_to_raised.add(context_id) + # all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError. + success = context_id_to_raised == known_context_ids + return success + + +# This function creates a dis autograd context, run rpc_sync on the given ps, +# and then blocks until the ps has verified the grads are correctly accumulated. +def _run_trainer(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + +# This function is the same as _run_trainer, except rpc calls torchscript +# function "my_script_ref_add" instead of python function "my_rref_add" +def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2)) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + # prevent deleting dist autograd context + rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff)) + rpc.rpc_sync(ps, _check_rpc_done, args=(0,)) + + +class SimulateBackwardError(Function): + _simulate_error = True + + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + if SimulateBackwardError._simulate_error: + raise Exception("Simulate error on backward pass") # noqa: TRY002 + else: + return input + + +class ExecMode(Enum): + LOCAL = 1 # Run the operation locally. + RPC_SYNC = 2 # Run the operation using rpc_sync + REMOTE = 3 # Run the operation using remote. + RPC_ASYNC = 4 # Run the operation using rpc_async + + +# Common utils for both CPU and CUDA test suites +class CommonDistAutogradTest(RpcAgentTestFixture): + def _exec_func_with_dst(self, dst, exec_mode, method, *args): + if ExecMode.LOCAL == exec_mode: + if len(args) == 1 and isinstance(args[0], list): + return method(*args[0]) + return method(*args) + elif ExecMode.RPC_SYNC == exec_mode: + return rpc.rpc_sync(worker_name(dst), method, args=(args)) + elif ExecMode.REMOTE == exec_mode: + return rpc.remote(worker_name(dst), method, args=(args)).to_here() + elif ExecMode.RPC_ASYNC == exec_mode: + fut = rpc.rpc_async(worker_name(dst), method, args=(args)) + return fut.wait() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + def _exec_func(self, exec_mode, method, *args): + return self._exec_func_with_dst( + self._next_rank(), exec_mode, method, *args + ) + + def _next_rank(self): + if hasattr(self, "dst_rank"): + self.dst_rank = (self.dst_rank + 1) % self.world_size + if self.dst_rank == self.rank: + return self._next_rank() + else: + self.dst_rank = (self.rank + 1) % self.world_size + return self.dst_rank + + def _check_rpc_done(self, rank_distance): + _check_rpc_done(rank_distance) + + def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args): + if exec_mode == ExecMode.LOCAL: + torch.autograd.backward(tensors) + return [arg.grad for arg in args] + else: + self._verify_backwards_remote(tensors, context_id, local_grads, *args) + + def _verify_backwards_remote(self, tensors, context_id, local_grads, *args): + dist_autograd.backward(context_id, tensors) + + # Verify grads were accumulated appropriately. + grads = dist_autograd.get_gradients(context_id) + nargs = len(args) + ngrads = 0 + for i in range(0, nargs): + if local_grads[i] is not None: + self.assertIn(args[i], grads) + self.assertEqual(local_grads[i], grads[args[i]]) + ngrads += 1 + else: + self.assertNotIn(args[i], grads) + + self.assertEqual(ngrads, len(grads)) + + def _test_graph(self, fn, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor() + t2 = build_sparse_tensor() + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2)) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), fn, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Verify graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # Verify graph for previous context id. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # autograd context should be cleaned up by now. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._retrieve_context(context_id) + + # No autograd context available. + with self.assertRaises(RuntimeError): + ctx = dist_autograd._current_context() + + # 3-layer nested calls + def _test_graph_for_py_nested_call(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + nest_dst_rank = (dst_rank + 1) % self.world_size + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=(t1, t2, dst_rank, self.world_size, 1), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + # Barrier to ensure all RPCs are done. + dist.barrier() + + for rd in [1, 2, 3]: + rpc.rpc_sync( + worker_name((self.rank + rd) % self.world_size), + _set_rpc_done, + args=(context_id, rd), + ) + + # Barrier to ensure all set_rpc_done have completed. + dist.barrier() + + # For self.rank, it has 4 graphs to verify + # One is for current context id when this rank send first rpc call. + # Second one is for prev context id when this rank make 1st nested + # call. + # Third one is for prev prev context id when this rank make + # 2nd nested call. + # Last one is for prev prev prev context id when this rank + # execute the torch.add() operator. + + # Verify first graph for current context id. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(1, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + next(iter(recv_functions.values())), + t1, + t2, + ret, + ) + + # Verify second graph for 1st nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + + # Verify third graph for 2nd nested call. + ctx = dist_autograd._retrieve_context(ctx_ids[2]) + self._verify_graph_for_nested_rpc_call(ctx) + + # verify last graph for rpc call execution. + ctx = dist_autograd._retrieve_context(ctx_ids[3]) + send_functions = ctx._send_functions() + self.assertEqual(1, len(send_functions)) + self._verify_graph_for_rpc_call_exec(next(iter(send_functions.values()))) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + # Rank0->Rank1->Rank0 + def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse): + dst_rank = (self.rank + 1) % self.world_size + + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), + my_py_nested_call, + args=( + t1, + t2, + (self.rank - 1 + self.world_size) % self.world_size, + self.world_size, + 0, + ), + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), + _set_rpc_done, + args=(context_id, 1), + ) + + # For self.rank, it has 2 graphs to verify. + # One is for current context id when this rank send first rpc + # call and execute the torch.add() operator. + # Another one is for prev context id when this rank make + # nested call. + ctx = dist_autograd._current_context() + self.assertEqual(context_id, ctx._context_id()) + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + recv_functions = ctx._recv_functions() + self.assertEqual(2, len(recv_functions)) + self._verify_graph_for_first_rpc_call( + next(iter(send_functions.values())), + list(recv_functions.values())[1], + t1, + t2, + ret, + ) + self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1]) + + # Verify two pairs of send and recv functions for nested + # call + self._check_rpc_done(1) + ctx = dist_autograd._retrieve_context(ctx_ids[1]) + self._verify_graph_for_nested_rpc_call(ctx) + # this barrier is needed so one worker does not clean up their + # autograd context before another worker tries to access it. + dist.barrier() + + def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if sparse: + t1 = build_sparse_tensor(requires_grad=False) + t2 = build_sparse_tensor(requires_grad=False) + else: + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.add, args=(t1, t2) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + ctx = dist_autograd._current_context() + send_functions = ctx._send_functions() + self.assertEqual(len(send_functions), 0) + recv_functions = ctx._recv_functions() + self.assertEqual(len(recv_functions), 0) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + # NB: RRef.to_here() always passes the autograd context to the + # the callee, as the caller does not know whether the return + # value would contain a requires_grad tensor or not. + # + # rpc/remote with udf (_set_rpc_done here) also always passes the + # autograd context to the callee due to the same reason. + self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1])) + dist.barrier() + + def _test_rpc_complex_args(self, exec_mode, sparse): + with dist_autograd.context() as context_id: + num_tensors = 10 + tensors = [] + for i in range(num_tensors): + if sparse: + tensor = build_sparse_tensor(requires_grad=(i % 2 == 0)) + else: + tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0)) + tensors.append(tensor) + dst_rank = self._next_rank() + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.stack, args=(tensors,) + ) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), torch.stack, args=(tensors,) + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + self.assertEqual(torch.stack(tensors), ret) + + # Verify appropriate tensors have been attached the autograd graph. + next_funcs = next(iter(dist_autograd._current_context()._send_functions().values())).next_functions + idx = 0 + for i in range(len(next_funcs)): + self.assertEqual( + "torch::autograd::AccumulateGrad", next_funcs[i][0].name() + ) + self.assertEqual(tensors[i], next_funcs[i][0].variable) + + # Verify that the worker id has been recorded in the context + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(len(worker_ids), 1) + self.assertEqual(worker_ids, {dst_rank}) + + def context_cleanup_test_helper(self, rpc_args, func, nested=False): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + if nested: + dst_rank = (self.rank + 1) % self.world_size + nested_dst_rank = (dst_rank + 1) % self.world_size + dst_ranks = {dst_rank} + else: + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + if nested: + rpc.rpc_sync( + worker_name(nested_dst_rank), + _set_rpc_done, + args=(context_id, 2), + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + def _backward_no_grad_on_tensor(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + self.assertIsNone(t1.grad) + self.assertIsNone(t2.grad) + + # Now populate .grad with local autograd engine and + # verify dist autograd doesn't mess with it. + loss_local = torch.add(t1, t2) + if sparse: + loss_local = torch.sparse.sum(loss_local) + else: + loss_local = loss_local.sum() + loss_local.backward() + self.assertIsNotNone(t1.grad) + self.assertIsNotNone(t2.grad) + + t1_grad_before = t1.grad + t2_grad_before = t2.grad + dist_autograd.backward(context_id, [loss]) + self.assertEqual(t1_grad_before, t1.grad) + self.assertEqual(t2_grad_before, t2.grad) + + # The current rank first creates a tensor on the rref_owner, and then passes + # the rref with another tensor to the callee to run either my_rref_add or + # my_nested_rref_add, depending on whether the callee is the rref owner. + # The grad of tensor lives on the current rank, and the grad of the rref + # tensor lives on the rref owner. + def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse): + local_ret = torch.add(t1, t2) + if sparse: + local_ret = torch.sparse.sum(local_ret) + else: + local_ret = local_ret.sum() + local_ret.backward() + with dist_autograd.context() as context_id: + if sparse: + rref_t1 = rpc.remote( + rref_owner, build_sparse_tensor, args=(False, True,) + ) + else: + rref_t1 = rpc.remote( + rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True} + ) + if callee == rref_owner: + rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2)) + else: + rref = rpc.remote( + callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2) + ) + ret = rref.to_here() + if sparse: + ret = torch.sparse.sum(ret) + else: + ret = ret.sum() + dist_autograd.backward(context_id, [ret]) + + # verify grads on caller + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t2, grads) + self.assertEqual(grads[t2], t2.grad) + + # verify grads on rref owner + self.assertTrue( + rpc.rpc_sync( + rref_owner, + _compare_owner_value, + args=(context_id, rref_t1, t1.grad), + ) + ) + + # In this test, every rank will serve as a parameter server (ps) and a + # driver, and then kicks off trainers on the other three ranks. So, we have: + # ps = rank0 with trainers = rank1/2/3 + # ps = rank2 with trainers = rank2/3/0 + # ps = rank3 with trainers = rank3/0/1 + # ps = rank4 with trainers = rank0/1/2 + # + # These four test ps-trainer groups run on completely separate autograd + # graphs, but they share the same set of underlying RpcAgents. + def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse): + if sparse: + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + else: + t1 = torch.ones((3, 3), requires_grad=True) + t2 = torch.zeros((3, 3), requires_grad=True) + + local_ret = torch.add(t1, t2) + if sparse: + torch.sparse.sum(local_ret).backward() + else: + local_ret.sum().backward() + + # create rref on self + rref_t1 = rpc.remote( + worker_name(self.rank), + create_ref_fn, + args=()) + + # kick off forward and backward pass on three other workers (trainers) + rank_diffs = [1, 2, 3] + futures = [] + for rank_diff in rank_diffs: + futures.append( + rpc.rpc_async( + worker_name((self.rank + rank_diff) % self.world_size), + trainer_fn, + args=(rref_t1, t2, worker_name(self.rank), rank_diff, sparse), + ) + ) + + # check if the trainers have done with their backward pass + for rank_diff in rank_diffs: + self._check_rpc_done(rank_diff) + + # trainers are done and holding the context for verification + accumulate_grad_func = None + for rank_diff in rank_diffs: + # make sure grads are accumulated for the same tensors and values + # are all correct + ctx_id = ctx_ids[rank_diff] + grads = dist_autograd.get_gradients(ctx_id) + local_t1 = rref_t1.to_here() + self.assertIn(local_t1, grads) + self.assertEqual(grads[local_t1], t1.grad) + + # unblock trainers + _set_rpc_done(None, 0) + + # wait until all trainers are done + torch.futures.wait_all(futures) + + def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + # Multiple RPCs between different nodes. + val = self._exec_func(exec_mode, torch.add, t1, t2) + val = self._exec_func(exec_mode, torch.mul, t3, val) + s1 = self._exec_func(exec_mode, torch.stack, (t4, val)) + s2 = self._exec_func(exec_mode, torch.stack, (t5, val)) + if sparse: + val = self._exec_func(exec_mode, torch.mul, s1, s2) + val = self._exec_func(exec_mode, torch.mul, val, val) + loss = torch.sparse.sum(val) + else: + val = self._exec_func(exec_mode, torch.bmm, s1, s2) + val = self._exec_func(exec_mode, torch.matmul, val, val) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5 + ) + local_grads = ret if ret else local_grads + + def _backward_different_dtypes(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + loss = self._exec_func(exec_mode, torch.add, t1, t2) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_python_udf(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, my_py_add, t1, t2) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple_script_call(self, t1, t2, sparse): + local_grads = None + for exec_mode in [ + ExecMode.LOCAL, + ExecMode.RPC_SYNC, + ExecMode.RPC_ASYNC, + ExecMode.REMOTE, + ]: + with dist_autograd.context() as context_id: + forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2) + if sparse: + loss = torch.sparse.sum(forward_ret) + else: + loss = forward_ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + def _nested_backward_accumulate_grads(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + ret = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._test_nested_backward_accumulate_grads, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + # Run backward twice. + dist_autograd.backward(context_id, [loss], retain_graph=True) + dist_autograd.backward(context_id, [loss]) + + def _backwards_nested_python_udf(self, t1, t2, sparse): + t3 = t1 * t2 + t4 = t1 + t2 + res = t3 + t4 + loss = t1 * t2 * t3 * t4 * res + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + torch.autograd.backward([loss]) + + # Now run distributed autograd. + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_python_udf, + args=(t1, t2, self._next_rank()), + ) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + dist_autograd.backward(context_id, [loss]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + def _mixed_requires_grad(self, t1, t2, sparse): + for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2 + ) + self.assertEqual(t1 * t2, ret) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + dist_autograd.backward(context_id, [loss]) + self.assertTrue(t1.requires_grad) + self.assertFalse(t2.requires_grad) + grads = dist_autograd.get_gradients(context_id) + self.assertIn(t1, grads) + self.assertNotIn(t2, grads) + self.assertEqual(t2, grads[t1]) + + def _multiple_backward(self, t1, t2, sparse): + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + torch.add, + args=(t1, t2)) + if sparse: + loss = torch.sparse.sum(loss) + else: + loss = loss.sum() + # Run backward in a loop multiple times. + for i in range(1000): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # For current context, this rank sends t1 and t2 tensors to dst_rank, + # then get t3 = torch.add(t1, t2) result tensor. + # For the current context in this rank, it expects graph like this: + # send function: + # rpcSendBackward + # / \ + # t1.AccumulateGrad t2.AccumulateGrad + # + # recv function: + # + # | + # t3.rpcRecvBackward + # + def _verify_graph_for_first_rpc_call( + self, send_function, recv_function, t1, t2, ret + ): + # Retrieve the next functions in the graph. + next_funcs = send_function.next_functions + self.assertEqual(2, len(next_funcs)) + + # We should now hit t1 and t2 in the autograd graph. + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name()) + self.assertEqual(t1, next_funcs[0][0].variable) + self.assertEqual(0, next_funcs[0][1]) + self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name()) + self.assertEqual(t2, next_funcs[1][0].variable) + self.assertEqual(0, next_funcs[1][1]) + + # Test recv functions. + self.assertEqual(ret.grad_fn, recv_function) + + # Run the same code locally and with dist autograd and verify gradients + # are same. + def _backward_simple(self, dst, t1, t2, local_grads, sparse): + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func_with_dst( + dst, exec_mode, torch.add, t1, t2 + ) + if sparse: + loss = torch.sparse.sum(ret) + else: + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, executes torch.add(t1, t2) and sends + # result tensor t3 back. + # For this context in this rank, it expects graph like this: + # send and recv functions: + # rpcSendBackward + # | + # t3.AddBackward0 + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + def _verify_graph_for_rpc_call_exec(self, send_function): + # Verify next function is AddBackward0 + next_funcs = send_function.next_functions + self.assertEqual(1, len(next_funcs)) + add_backward_fn = next_funcs[0][0] + self.assertEqual("AddBackward0", add_backward_fn.name()) + + # Verify the next two functions are the same recv backward function. + next_funcs = add_backward_fn.next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For a context passed from previous nested chain calls, this rank + # receives two tensors t1 and t2, forwards t1 and t2 tensors using + # nested rpc call to next dst. In return route, receive result tensor t3 + # from next dst and forwarding t3 back to previous calls. + # For this context in this rank, it expects graph like this: + # send and recv functions for receiving and forwarding t1 and t2: + # rpcSendBackward + # / \ + # t1.recvRpcBackward t2.recvRpcBackward + # send and recv functions for receiving and forwarding t3: + # rpcSendBackward + # | + # t3.recvRpcBackward + def _verify_graph_for_nested_rpc_call(self, ctx): + send_functions = ctx._send_functions() + self.assertEqual(2, len(send_functions)) + + # For send function when making nest rpc call, + # next functions of the send function are two recv functions + # for received two tensors from previous call + next_funcs = next(iter(send_functions.values())).next_functions + self.assertEqual(2, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name() + ) + self.assertEqual(next_funcs[0][0], next_funcs[1][0]) + + # For send function when returning response to previous call + # next function of the send function is the recv function + # for received tensor result returned from nested call + next_funcs = list(send_functions.values())[1].next_functions + self.assertEqual(1, len(next_funcs)) + self.assertEqual( + "torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name() + ) + + +class TensorPipeAgentDistAutogradTest(CommonDistAutogradTest): + + # Sparse tests only work with TensorPipeAgent. + @dist_init + def test_graph_for_builtin_call_sparse(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_python_call_sparse(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_builtin_remote_call_sparse(self): + self._test_graph(torch.add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_python_remote_call_sparse(self): + self._test_graph(my_py_add, ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_sparse(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, True) + + @dist_init + def test_graph_for_py_nested_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True) + + @dist_init + def test_graph_for_py_nested_remote_call_itself_sparse(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote_sparse(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True) + + @dist_init + def test_rpc_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, True) + + @dist_init + def test_remote_complex_args_sparse(self): + self._test_rpc_complex_args(ExecMode.REMOTE, True) + + @dist_init + def test_context_cleanup_tensor_with_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad_sparse(self): + t1 = build_sparse_tensor(requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_nested_rpc_sparse(self): + t1 = build_sparse_tensor(requires_grad=True) + t2 = build_sparse_tensor(requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_backward_no_grad_on_tensor_sparse(self): + self._backward_no_grad_on_tensor( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_sparse(self): + self._backward_simple( + self._next_rank(), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_simple_self_sparse(self): + self._backward_simple( + self.rank, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_multi_sparse(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_sparse(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_rref_nested_sparse(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_trainer_ps_sparse(self): + self._test_trainer_ps( + build_sparse_tensor, + _run_trainer, + True + ) + + @dist_init + def test_backward_multiple_round_trips_sparse(self): + self._backward_multiple_round_trips( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + build_sparse_tensor(requires_grad=True), + None, + True + ) + + @dist_init + def test_backward_different_dtypes_sparse(self): + self._backward_different_dtypes( + build_sparse_tensor(requires_grad=True, dtype=torch.float32), + build_sparse_tensor(requires_grad=True, dtype=torch.float64), + True + ) + + @dist_init + def test_backward_simple_python_udf_sparse(self): + self._backward_simple_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backward_simple_script_call_sparse(self): + self._backward_simple_script_call( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_nested_backward_accumulate_grads_sparse(self): + self._nested_backward_accumulate_grads( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_backwards_nested_python_udf_sparse(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_mixed_requires_grad_sparse(self): + self._mixed_requires_grad( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=False), + True + ) + + @dist_init + def test_multiple_backward_sparse(self): + self._multiple_backward( + build_sparse_tensor(requires_grad=True), + build_sparse_tensor(requires_grad=True), + True + ) + + @dist_init + def test_embedding_bag_with_no_grad_tensors(self): + dst = self._next_rank() + remote_embedding = rpc.remote( + worker_name(dst), + torch.nn.EmbeddingBag, + args=(16, 16), + kwargs={"mode": "sum", "sparse": True}, + ) + local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True) + + input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) + # requires_grad = True to record send/recv functions + per_sample_weights = torch.rand((8), requires_grad=True) + offsets = torch.LongTensor([0, 4]) + + local_res = local_embedding(input, offsets, per_sample_weights) + + # Run backward twice. + torch.autograd.backward([local_res.sum()], retain_graph=True) + torch.autograd.backward([local_res.sum()]) + local_grad = local_embedding.weight.grad + + with dist_autograd.context() as context_id: + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._call_remote_embedding, + args=(remote_embedding, input, offsets, per_sample_weights), + ) + + # Run backward twice to test accumulation of sparse gradients. + dist_autograd.backward(context_id, [res.sum()], retain_graph=True) + dist_autograd.backward(context_id, [res.sum()]) + + remote_grad = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._get_grad, + args=(remote_embedding, context_id), + ) + + self.assertEqual(local_grad, remote_grad) + + +class DistAutogradTest(CommonDistAutogradTest): + @dist_init + def test_autograd_context(self): + # Verify max possible id. + max_auto_increment = 281474976710655 + self.assertEqual( + max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id() + ) + + context_ids = [] + for i in range(200): + with dist_autograd.context() as context_id: + self.assertEqual( + context_id, + dist_autograd._retrieve_context(context_id)._context_id(), + ) + # First 16 bits should be worker_id. + self.assertEqual(self.worker_id, context_id >> 48) + context_ids.append(context_id) + + for context_id in context_ids: + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd._retrieve_context(context_id) + + @dist_init + def test_nested_context(self): + with dist_autograd.context() as context_id: + # Nested contexts not supported. + with self.assertRaisesRegex( + RuntimeError, "Already have an autograd context id for this thread" + ): + with dist_autograd.context() as context_id: + pass + + @dist_init + def test_graph_for_builtin_call(self): + self._test_graph(torch.add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_python_call(self): + self._test_graph(my_py_add, ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_builtin_remote_call(self): + self._test_graph(torch.add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_python_remote_call(self): + self._test_graph(my_py_add, ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call(self): + self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call(self): + self._test_graph_for_py_nested_call(ExecMode.REMOTE, False) + + @dist_init + def test_graph_for_py_nested_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False) + + @dist_init + def test_graph_for_py_nested_remote_call_itself(self): + self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False) + + @dist_init + def test_no_graph_with_tensors_not_require_grad_remote(self): + self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False) + + def _test_grad_only_on_return_value(self, exec_mode): + initialize_pg(self.file_init_method, self.rank, self.world_size) + dst_rank = (self.rank + 1) % self.world_size + with dist_autograd.context() as context_id: + if ExecMode.RPC_SYNC == exec_mode: + ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad) + elif ExecMode.REMOTE == exec_mode: + ret = rpc.remote( + worker_name(dst_rank), ret_requires_grad + ).to_here() + else: + raise ValueError(f"Unrecognized ExecMode {exec_mode}") + + dist_autograd.backward(context_id, [ret.sum()]) + + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + + # Wait for the prev rank to be done with rpc. + self._check_rpc_done(1) + grads = dist_autograd.get_gradients(ctx_ids[1]) + self.assertEqual(1, len(grads)) + self.assertIn(requires_grad_tensor, grads) + self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor]) + # due to the above get_gradients call, ensure that dist autograd + # contexts aren't cleaned up until all workers exit context managers + dist.barrier() + + @dist_init + def test_grad_only_on_return_value(self): + self._test_grad_only_on_return_value(ExecMode.RPC_SYNC) + + @dist_init + def test_grad_only_on_return_value_remote(self): + self._test_grad_only_on_return_value(ExecMode.REMOTE) + + @dist_init + def test_rpc_complex_args(self): + self._test_rpc_complex_args(ExecMode.RPC_SYNC, False) + + @dist_init + def test_remote_complex_args(self): + self._test_rpc_complex_args(ExecMode.REMOTE, False) + + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_context_cleanup_tensor_no_grad(self): + t1 = torch.ones(3, 3, requires_grad=False) + self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add) + + @dist_init + def test_context_cleanup_no_tensors(self): + self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add) + + @dist_init + def test_context_cleanup_nested_rpc(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + dst_rank = (self.rank + 1) % self.world_size + args = (t1, t2, dst_rank, self.world_size, 0) + self.context_cleanup_test_helper( + rpc_args=args, func=my_py_nested_call, nested=True + ) + + @dist_init + def test_worker_ids_recorded(self): + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + with dist_autograd.context() as context_id: + # if no tensors require grad, we should still record worker_ids, as + # the autograd context ID is still passed to other workers. + t1 = torch.ones(3, 3, requires_grad=False) + t2 = torch.zeros(3, 3, requires_grad=False) + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + ctx = dist_autograd._current_context() + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + # worker_ids should be recorded when tensors do require grad + t1.requires_grad = True + t2.requires_grad = True + for dst_rank in dst_ranks: + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(t1, t2) + ) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # all worker_ids in dst_ranks should be recorded. + worker_ids = ctx._known_worker_ids() + self.assertEqual(worker_ids, dst_ranks) + + @dist_init + def test_dist_autograd_profiling(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum() + with torch.autograd.profiler.profile() as p: + dist_autograd.backward(context_id, [loss]) + + function_events = p.function_events + + def get_event(partial_key): + return next(event for event in function_events if partial_key in event.name) + + send_event = get_event("SendRpcBackward") + recv_event = get_event("RecvRpcBackward") + backward_event = get_event("torch::distributed::autograd::backward") + # There should be at least 1 send and recv_events each, corresponding to send/recv functions executed. + self.assertEqual(send_event.count, 1) + self.assertEqual(recv_event.count, 1) + # The CPU total for backward event should be great than send and recv, since + # applying those functions in the backwards pass is a subset of the entire backward pass. + self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total) + self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total) + + @dist_init + def test_error_in_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(6, 6, requires_grad=True) + + with self.assertRaises(RuntimeError): + # This should throw an error since matrix sizes don't match. + rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + + @dist_init + def test_backward_no_grad_on_tensor(self): + self._backward_no_grad_on_tensor( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + False + ) + + @dist_init + def test_backward_simple(self): + self._backward_simple( + self._next_rank(), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_simple_self(self): + self._backward_simple( + self.rank, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref(self): + callee = worker_name(self._next_rank()) + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_multi(self): + if self.rank > 0: + callee = "worker0" + rref_owner = callee + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_rref_nested(self): + callee = worker_name((self.rank + 1) % self.world_size) + rref_owner = worker_name((self.rank + 2) % self.world_size) + self._backward_rref( + callee, + rref_owner, + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_trainer_ps(self): + self._test_trainer_ps( + create_tensor, + _run_trainer, + False + ) + + @dist_init + def test_trainer_ps_torchscript_functions(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + api._ignore_rref_leak = True + + self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False) + + @dist_init + def test_backward_multiple_round_trips(self): + self._backward_multiple_round_trips( + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + torch.rand((3, 3)), + torch.rand((3, 3), requires_grad=True), + None, + False + ) + + @dist_init + def test_backward_different_tensor_dims(self): + local_grads = None + t1 = torch.rand((4, 6), requires_grad=True) + t2 = torch.rand((6, 5)) + t3 = torch.rand((5, 7), requires_grad=True) + t4 = torch.rand((7, 9)) + + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + val = self._exec_func(exec_mode, torch.matmul, t1, t2) + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4)) + loss = val.sum() + + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_unused_tensors(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3)) + val = self._exec_func( + exec_mode, + torch.matmul, + torch.narrow(s, 0, 0, 1), + torch.narrow(s, 0, 2, 1), + ) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2, t3 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_backward_multiple_output_tensors(self): + local_grads = None + t = torch.rand((10, 2), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + tensor_list = self._exec_func(exec_mode, torch.split, t, 2) + t1 = tensor_list[0] + t2 = tensor_list[2] + t3 = tensor_list[4] + + val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3)) + + loss = val.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t + ) + local_grads = ret if ret else local_grads + + def _run_test_backward_unused_send_function_in_thread(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + # We don't use the result of an RPC function, as a result the + # backward pass would hang in the "FAST" mode. + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + val = torch.mul(t1, t2) + + # Run backward, this would hang forever. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init + def test_backward_unused_send_function(self): + # Run the test in a thread which would never finish. + t = threading.Thread( + target=self._run_test_backward_unused_send_function_in_thread + ) + t.daemon = True + t.start() + t.join(10) # Wait for 10s. + + # Verify thread is still alive (indicating backward hasn't completed yet). + self.assertTrue(t.is_alive()) + + @dist_init + def test_backward_autograd_engine_error(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + # Perform some ops before error simulation. + tmp = (t1 + t2) * (t1 + t2) + t3 = SimulateBackwardError.apply(tmp) + + # Run multiple round trips across different nodes and verify the + # original node receives an error thrown on a node deep in the chain. + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t2, t3) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.mul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(val, t2) + ) + val = rpc.rpc_sync( + worker_name(self._next_rank()), torch.div, args=(val, t2) + ) + + with self.assertRaisesRegex( + RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass" + ): + # Run backwards, and validate we receive an error. + dist_autograd.backward(context_id, [val.sum()]) + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure(self): + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + + # Wait for all RPCs to be done. + dist.barrier() + + # Kill all odd rank nodes. + if self.rank % 2 == 0: + shutdown_error_regex = self.get_shutdown_error_regex() + # Wait for all other nodes to die. + for rank in range(self.world_size): + if rank % 2 != 0: + wait_until_node_failure(rank, shutdown_error_regex) + + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex() + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since all + # other nodes are dead. + dist_autograd.backward(context_id, [res.sum()]) + else: + # Exit all other nodes. + pass + + @dist_init + def test_backward_without_context(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + res = rpc.rpc_sync( + worker_name(self._next_rank()), torch.add, args=(t1, t2) + ) + dist_autograd.backward(context_id, [res.sum()]) + + @dist_init + def test_backward_without_rpc(self): + dst_rank = self.rank + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_backward_invalid_args(self): + with dist_autograd.context() as context_id: + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(context_id, None) + + with self.assertRaisesRegex(TypeError, "incompatible function arguments"): + dist_autograd.backward(None, None) + + with self.assertRaisesRegex( + RuntimeError, "No tensors provided for gradient computation" + ): + dist_autograd.backward(context_id, []) + + with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"): + t = torch.rand(3, 3) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "is not a scalar, all roots need to be scalar" + ): + t = torch.rand(3, 3, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + with self.assertRaisesRegex( + RuntimeError, "does not have a valid gradient function" + ): + t = torch.rand(1, requires_grad=True) + dist_autograd.backward(context_id, [t]) + + @dist_init + def test_backward_multiple_roots(self): + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum() + r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum() + r3 = self._exec_func(exec_mode, torch.cos, t1).sum() + r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum() + + local_grads = self._verify_backwards( + exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2 + ) + + @dist_init + def test_backward_different_dtypes(self): + self._backward_different_dtypes( + torch.rand((3, 3), requires_grad=True, dtype=torch.float32), + torch.rand((3, 3), requires_grad=True, dtype=torch.float64), + False + ) + + @dist_init + def test_backward_simple_python_udf(self): + self._backward_simple_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_backward_simple_script_call(self): + self._backward_simple_script_call( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @staticmethod + def _complex_python_udf(t1, t2): + t3 = torch.nn.functional.linear(t1, t2) + t4 = torch.nn.functional.linear(t2, t3) + t5 = torch.nn.functional.linear(t3, t4) + return torch.linalg.multi_dot([t1, t2, t3, t4, t5]) + + @dist_init + def test_backward_complex_python_udf(self): + # Run the same code locally and with dist autograd and verify gradients + # are same. + local_grads = None + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func( + exec_mode, DistAutogradTest._complex_python_udf, t1, t2 + ) + loss = ret.sum() + local_grads = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + + @staticmethod + def _python_udf_with_backward_error(t1, t2): + t3 = t1 + t2 + t4 = SimulateBackwardError.apply(t3) + return torch.linalg.multi_dot([t1, t2, t3, t4]) + + @staticmethod + def _nested_rpc_call_backward_error(t1, t2, dst): + t1 = t1 * t2 + t2 = t1 + t2 + res = rpc.rpc_sync( + worker_name(dst), + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2), + ) + return torch.linalg.multi_dot([t1, t2, res]) + + @dist_init + def test_backward_python_udf_error(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + worker_name(self._next_rank()), + DistAutogradTest._nested_rpc_call_backward_error, + args=(t1, t2, self._next_rank()), + ) + with self.assertRaisesRegex( + RuntimeError, "Simulate error on backward pass" + ): + dist_autograd.backward(context_id, [loss.sum()]) + + _backward_done = False + + @dist_init(clean_shutdown=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_backward_node_failure_python_udf(self): + # Set a short timeout to quickly time out failed RPCs. + rpc._set_rpc_timeout(5) # 5 seconds + initialize_pg(self.file_init_method, self.rank, self.world_size) + + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + + dst = self._next_rank() + res = rpc.rpc_sync( + worker_name(dst), + my_py_nested_call, + args=(t1, t2, dst, self.world_size, 1), + ) + + dist.barrier() + + # Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error. + if self.rank == 2: + return + + store = dist.distributed_c10d._get_default_store() + if self.rank == 0: + # Wait for rank 2 to die. + shutdown_error_regex = self.get_shutdown_error_regex() + wait_until_node_failure(2, shutdown_error_regex) + # Shutdown sequence is not very well defined and as a result + # we might see any error given by get_shutdown_error_regex(). + with self.assertRaisesRegex(RuntimeError, shutdown_error_regex): + # Run backwards, and validate we receive an error since rank 2 is dead. + dist_autograd.backward(context_id, [res.sum()]) + + # Mark rank 0 is done in the store, since the RPC framework on + # some nodes might be broken at this point. + store.set('test_backward_node_failure_python_udf_rank0_done', "True") + else: + # Wait for backward to finish on rank 0. + store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10)) + + @staticmethod + def _nested_python_udf(t1, t2, dst): + t3 = t1 * t2 + t4 = t1 + t2 + res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4)) + return t1 * t2 * t3 * t4 * res + + @dist_init + def test_backwards_nested_python_udf(self): + # Run equivalent of _nested_python_udf locally. + self._backwards_nested_python_udf( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + _test_clean_context_backward_context_id = None + + class MyBackwardFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + assert DistAutogradTest._test_clean_context_backward_context_id is not None + + # Release the context to simulate error (use barrier before releasing + # context to ensure all nodes execute the backward function). + dist.barrier() + dist_autograd._release_context( + DistAutogradTest._test_clean_context_backward_context_id + ) + + # Verify all contexts are cleaned up. + assert _all_contexts_cleaned_up() + + return input + + @dist_init + def test_clean_context_during_backward(self): + """ + This test simulates the situation where the 'backward' call might throw + an exception locally which would lead to the autograd context being + cleaned up if we're using the context manager. As a result, the autograd + context might be cleaned up while some threads are still using the + autograd context. + + It is fine for the 'backward' call to throw an exception in this test, + but the process should not crash. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + + context = dist_autograd._new_context() + context_id = context._context_id() + DistAutogradTest._test_clean_context_backward_context_id = context_id + + # Send the context id to all nodes. + for i in range(0, self.world_size): + if i != self.rank: + rank_distance = (i - self.rank + self.world_size) % self.world_size + rpc.rpc_sync( + worker_name(i), + _set_rpc_done, + args=(context_id, rank_distance), + ) + + dist.barrier() + + # Verify all context ids have been received. + self.assertEqual(self.world_size - 1, len(known_context_ids)) + + t1 = torch.rand((3, 3), requires_grad=True) + for i in range(0, 100): + dst = self._next_rank() + t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1)) + + # Call MyBackwardFunc as the first op of the backward pass to + # ensure we release the context early in the backward pass. + t1 = DistAutogradTest.MyBackwardFunc.apply(t1) + self.assertEqual(100, len(context._send_functions())) + + context_id = 100 # dummy context_id + with self.assertRaisesRegex( + RuntimeError, + f"Could not find autograd context with id: {context_id}", + ): + dist_autograd.backward(context_id, [t1.sum()]) + + # HACK: Killing workers since otherwise the autograd engine gets stuck on + # other nodes. The proper fix would be addressing: + # https://github.com/pytorch/pytorch/issues/27643, which would inform + # other nodes about the failure. + # The autograd engine gets stuck on other nodes since they're waiting to + # receive gradients from the node that received an error (and as a + # result it didn't execute the rest of the graph). + dist.barrier() + rpc.shutdown(graceful=False) + sys.exit(0) + + @classmethod + def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights): + embedding = embedding_rref.local_value() + return embedding(input, offsets, per_sample_weights) + + @classmethod + def _get_grad(cls, embedding_rref, context_id): + embedding = embedding_rref.local_value() + grad_map = dist_autograd.get_gradients(context_id) + return grad_map[embedding.weight] + + @classmethod + def _mixed_requires_grad_operaton(cls, t1, t2): + if t2.requires_grad: + return t1 - t2 + else: + return t1 * t2 + + @dist_init + def test_mixed_requires_grad(self): + self._mixed_requires_grad( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=False), + False + ) + + class TestDebugInfoFunc(Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + @once_differentiable + def backward(ctx, input): + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + backward_passes = int(debug_info["num_current_backward_passes"]) + + # Hard to validate exact numbers because of the distributed nature. + # We can't use a barrier() here since that would block the single + # CPU thread available for autograd and can cause deadlocks. + assert backward_passes >= 1 and backward_passes <= 4 + return input + + @dist_init + def test_debug_info(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + i = 0 + res = {} + res[i] = t1 + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + # Call custom function in middle of backward pass to ensure all + # nodes are still waiting on a backward(). + res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i]) + i += 1 + + for rank in range(self.world_size): + if rank != self.rank: + res[i + 1] = rpc.rpc_sync( + worker_name(rank), torch.add, args=(res[i], t2) + ) + i += 1 + + dist_autograd.backward(context_id, [res[i].sum()]) + + debug_info = dist_autograd._get_debug_info() + num_autograd_context = int(debug_info["num_autograd_contexts"]) + # Need atleast one context and not more than 4. + self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4) + + for rd in range(self.world_size - 1): + rpc.rpc_sync( + worker_name((self.rank + rd + 1) % self.world_size), + _set_rpc_done, + args=(context_id, rd + 1), + ) + + dist.barrier() + + # Validate information + debug_info = dist_autograd._get_debug_info() + assert debug_info is not None + self.assertEqual(0, int(debug_info["num_current_backward_passes"])) + # only have `num_current_backward_passes` and `num_autograd contexts` + self.assertTrue(len(debug_info) == 2) + + self.assertTrue(_all_contexts_cleaned_up()) + + # All contexts should be cleaned up. + debug_info = dist_autograd._get_debug_info() + self.assertEqual(0, int(debug_info["num_autograd_contexts"])) + + @staticmethod + def _workload_thread(): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2)) + t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3)) + t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4)) + t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5)) + + dist_autograd.backward(context_id, [t6.sum()]) + + @dist_init + def test_async_dist_autograd(self): + """ + This test ensures async processing for distributed autograd works + appropriately. This is achieved by spawning multiple threads and + hammering a single node with a lot of backward() calls. + """ + + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + # All other ranks schedule work on rank 0. + threads = [] + for i in range(20): + t = threading.Thread(target=DistAutogradTest._workload_thread) + t.start() + threads.append(t) + + for thread in threads: + thread.join() + + dist.barrier() + + @dist_init + def test_backward_accumulate_grads(self): + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + t3 = torch.matmul(t1, t2) + # Run backward twice. + torch.autograd.backward([t3.sum()], retain_graph=True) + torch.autograd.backward([t3.sum()]) + + t3 = rpc.rpc_sync( + worker_name(self._next_rank()), torch.matmul, args=(t1, t2) + ) + # Run backward twice. + dist_autograd.backward(context_id, [t3.sum()], retain_graph=True) + dist_autograd.backward(context_id, [t3.sum()]) + + # Verify the gradients are same for local and remote execution. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @staticmethod + def _test_nested_backward_accumulate_grads(t1, t2, dst_rank): + return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2)) + + @dist_init + def test_nested_backward_accumulate_grads(self): + self._nested_backward_accumulate_grads( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init + def test_multiple_backward(self): + self._multiple_backward( + torch.rand(3, 3, requires_grad=True), + torch.rand(3, 3, requires_grad=True), + False + ) + + @dist_init(clean_shutdown=False) + def test_multiple_backward_with_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + with dist_autograd.context() as context_id: + loss = rpc.rpc_sync( + f'worker{self._next_rank()}', + DistAutogradTest._python_udf_with_backward_error, + args=(t1, t2)).sum() + + try: + # Run backward in a loop multiple times. + for i in range(100): + if i < 50: + with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"): + dist_autograd.backward(context_id, [loss], retain_graph=True) + elif i > 50: + # Recovered from error. + dist_autograd.backward(context_id, [loss], retain_graph=True) + else: + dist.barrier() + SimulateBackwardError._simulate_error = False + dist.barrier() + finally: + # Sync before resetting flag. + dist.barrier() + + # Reset the flag. + SimulateBackwardError._simulate_error = True + + @dist_init + def test_backward_verify_hooks(self): + t1 = torch.ones((3, 3), requires_grad=True) + # Double the gradient. + t1.register_hook(lambda grad: grad * 2) + t2 = torch.ones((3, 3), requires_grad=True) + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]: + with dist_autograd.context() as context_id: + ret = self._exec_func(exec_mode, torch.matmul, t1, t2) + loss = ret.sum() + ret = self._verify_backwards( + exec_mode, [loss], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @dist_init + def test_no_grad_copy(self): + ''' + Similar to test in test_autograd.py. + ''' + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad.data_ptr() + return grad, grad + + class MyFuncSingleGrad(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFuncSingleGrad.static_grad_ptr = grad.data_ptr() + return grad + + class NonContGradFunc(Function): + @staticmethod + def forward(ctx, inp1): + ctx.size = inp1.size() + return torch.tensor([1.]) + + @staticmethod + def backward(ctx, grad): + return torch.ones(1).expand(ctx.size) + + a = torch.randn(5, 6, requires_grad=True) + b = torch.randn(5, 6, requires_grad=True) + # non-contiguous grad should be copied + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))]) + grads = dist_autograd.get_gradients(context_id) + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + # test case that should trigger no copy for a + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFuncSingleGrad.static_grad_ptr + p_a = grads[a].data_ptr() + # Verify there was no clone. + self.assertTrue(p_a == p_g) + + # Test case that should trigger copy for both of a,b. This is + # different in the distributed autograd case since we hold + # a reference to all grads in a vector until all accumulation is done. + with dist_autograd.context() as context_id: + dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]]) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a].data_ptr() + p_b = grads[b].data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # both should be copied. + self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr) + self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr) + + @dist_init + def test_no_grad_copy_sparse(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + return grad + + class NonContGradFunc(Function): + static_grad_ptr = None + + @staticmethod + def forward(ctx, inp1, inp2): + return inp1 + inp2 + + @staticmethod + def backward(ctx, grad): + # Create a sparse tensor with non-contiguous indices and values + # and return as grad. + v = torch.rand(1, 3) + i = torch.ones(1, 1, dtype=torch.long) + nv = v.expand(8, 3) + ni = i.expand(1, 8) + ngrad = torch.sparse_coo_tensor(ni, nv, (10, 3), dtype=torch.float32) + NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() + return ngrad, ngrad + + a = torch.randn(10, 3, requires_grad=True) + b = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + # test case that should trigger no copy for a. + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + # check a uses the same buffer + self.assertTrue(p_a == p_g) + + # Run backwards multiple times. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + # non-contiguous indices and value, we should trigger a copy. + with dist_autograd.context() as context_id: + emb_matrix = NonContGradFunc.apply(a, b) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = NonContGradFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + p_b = grads[b]._values().data_ptr() + # check a,b uses different grad buffer + self.assertFalse(p_a == p_b) + # Verify we cloned both grads. + self.assertFalse(p_a == p_g) + self.assertFalse(p_b == p_g) + + # Run backwards multiple times to verify accumulation. + for i in range(10): + dist_autograd.backward(context_id, [loss], retain_graph=True) + + @dist_init + def test_grad_copy_sparse_indices_extra_ref(self): + # create autograd function that saves grad pointer as class static + class MyFunc(Function): + static_grad_ptr = None + static_grad_indices_ref = None + static_grad_values_ref = None + + @staticmethod + def forward(ctx, inp): + return inp + + @staticmethod + def backward(ctx, grad): + MyFunc.static_grad_ptr = grad._values().data_ptr() + # indices() and values() return views, so holding onto + # references of them would not increment refcount of indices + # and values inside the sparse tensor. + MyFunc.static_grad_indices_ref = grad._indices() + MyFunc.static_grad_values_ref = grad._values() + return grad + + a = torch.randn(10, 3, requires_grad=True) + input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + offsets = torch.tensor([0, 4]) + import torch.nn.functional as F + + with dist_autograd.context() as context_id: + emb_matrix = MyFunc.apply(a) + loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() + dist_autograd.backward(context_id, [loss], retain_graph=True) + grads = dist_autograd.get_gradients(context_id) + p_g = MyFunc.static_grad_ptr + p_a = grads[a]._values().data_ptr() + self.assertIsNotNone(MyFunc.static_grad_indices_ref) + self.assertIsNotNone(MyFunc.static_grad_values_ref) + # grad would be stolen, since static_grad_indices_ref and + # static_grad_values_ref are holding onto views and don't bump the + # refcount. + self.assertTrue(p_g == p_a) + + @dist_init + def test_post_hooks(self): + self.hook_called_times = 0 + + def post_hook_add_one(output_grads, input_grads): + self.hook_called_times += 1 + return output_grads + + def post_hook_add_two(output_grads, input_grads): + self.hook_called_times += 2 + return output_grads + + t = torch.rand(10, 10, requires_grad=True) + a = t + t + + # Register post hooks + accumulate_grad_0 = a.grad_fn.next_functions[0][0] + accumulate_grad_0.register_hook(post_hook_add_one) + accumulate_grad_0.register_hook(post_hook_add_two) + + accumulate_grad_1 = a.grad_fn.next_functions[1][0] + accumulate_grad_1.register_hook(post_hook_add_two) + + with dist_autograd.context() as context_id: + loss = a.sum() + dist_autograd.backward(context_id, [loss]) + self.assertEqual(5, self.hook_called_times) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(1, len(grads)) + self.assertTrue(t in grads) + + @staticmethod + def _slow_add(t1, t2): + time.sleep(1) + t3 = t1 + t2 + t3.requires_grad = True + return t3 + + @dist_init + def test_thread_local_context_id(self): + t1 = torch.rand((3, 3)) + t2 = torch.rand((3, 3)) + + t3 = t1 + t2 + t3.requires_grad = True + t3.sum().backward() + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2)) + + with dist_autograd.context() as context_id: + loss = rref.to_here().sum() + # due to slow add, the continuation of this backward pass will be + # invoked by the previous rpc.remote thread which does not have a + # valid context_id. So, this can test whether we propagate + # thread_local states properly when jumping across threads on the + # server side. + dist_autograd.backward(context_id, [loss]) + self.assertTrue( + rpc.rpc_sync( + dst, + _compare_owner_value, + args=(context_id, rref, t3.grad) + ) + ) + + +class CudaDistAutogradTest(CommonDistAutogradTest): + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_simple(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + (t1 + t2).sum().backward() + with dist_autograd.context() as context_id: + t3 = t1 + t2 + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertEqual(t1.grad, grads[t1]) + self.assertEqual(t2.grad, grads[t2]) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5) + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t7.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + @skip_if_lt_x_gpu(1) + @dist_init + def test_gpu_to_cpu_continuation_gpu_root(self): + t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0") + t2 = torch.rand(3, 3, requires_grad=True) + # Run a few iterations. + for i in range(3): + t1.grad = None + t2.grad = None + # Root is CPU + local_grads = None + for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]: + with dist_autograd.context() as context_id: + t3 = self._exec_func(exec_mode, torch.add, t2, t2) + t4 = t3.cuda(0) + t1 + t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2) + t6 = t5.cuda(0) + t4 + # Autograd graph consists of CPU -> GPU -> CPU execution. + ret = self._verify_backwards( + exec_mode, [t6.sum()], context_id, local_grads, t1, t2 + ) + local_grads = ret if ret else local_grads + + +class FaultyAgentDistAutogradTest(RpcAgentTestFixture): + # Reusing a simplified helper function from DistAutogradTest to ensure + # autograd context is successfully cleaned up even when RPCs are failing. + def context_cleanup_test_helper(self, rpc_args, func): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # test that in dist autograd, in the case that tensors communicated over RPC do + # NOT require grad, we still cleanup the dist autograd contexts created + # on other nodes. This is because the autograd context is still + # communicated over RPC even if tensor arguments do not require grad, as + # it is possible that the response could. + dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank} + + with dist_autograd.context() as context_id: + for dst_rank in dst_ranks: + rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args) + rpc.rpc_sync( + worker_name(dst_rank), _set_rpc_done, args=(context_id, 1) + ) + # the thread's context id should be cleaned up + with self.assertRaises(RuntimeError): + dist_autograd._retrieve_context(context_id) + # Ensure all peers have finished mutating the + # `known_context_ids` set. + dist.barrier() + # check that all contexts have been cleaned up. + success = _all_contexts_cleaned_up() + self.assertTrue(success) + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init + def test_context_cleanup_tensor_with_grad(self): + t1 = torch.ones(3, 3, requires_grad=True) + t2 = torch.zeros(3, 3, requires_grad=True) + self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add) + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + + +class WrapperModule(nn.Module): + def __init__(self, model, device): + super().__init__() + self.model = model.to(device) + + def forward(self, *args): + return self.model(*args) + + def gradients(self, ctx_id): + grads = dist_autograd.get_gradients(ctx_id) + return [grads[p] for p in self.model.parameters()] + + +class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(4) + def test_device_maps_backward_pass(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t1 = torch.rand(10, device=self.rank, requires_grad=True) + t2 = torch.rand(10, device=self.rank, requires_grad=True) + with dist_autograd.context() as context_id: + res = rpc.rpc_sync(dst, torch.add, args=(t1, t2)) + dist_autograd.backward(context_id, [res.sum()]) + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(torch.ones(10), grads[t1]) + self.assertEqual(torch.ones(10), grads[t2]) + self.assertEqual(t1.device, grads[t1].device) + self.assertEqual(t2.device, grads[t2].device) + + rpc.shutdown() + + class MyRemoteCompute(torch.nn.Module): + def forward(self, input): + input = input * 2.0 + return input + + class MyLocalCompute(torch.nn.Module): + def __init__(self, next_stage): + super().__init__() + self.next_stage = next_stage + + def forward(self, input): + return self.next_stage.rpc_sync().forward(input) + + @skip_if_lt_x_gpu(4) + def test_dist_autograd_sync_streams(self): + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + + # The reverse of this device mapping should be used for the backward pass. + options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute) + local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute) + for _ in range(10): + input = torch.rand([1000, 10000], device=self.rank, requires_grad=True) + # Run local autograd + result = input * 2.0 + r = random.random() + loss = result.sum() * r + loss.backward() + + # Run distributed autograd + with dist_autograd.context() as context_id: + result = local_compute(input) + loss = result.sum() * r + dist_autograd.backward(context_id, [loss]) + + # Compare grads. + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(input.grad, grads[input]) + + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_gradients_synchronizations(self): + options = self.rpc_backend_options + for peer_rank in range(self.world_size): + options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # this is master + layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)] + local_layers = [l.to(0) for l in layers] + remote_layers = [] + for rank in range(1, self.world_size): + remote_layers.append(rpc.remote( + worker_name(rank), + WrapperModule, + args=(layers[rank - 1], rank) + )) + + x = torch.randn(5000, 2000).to(0) + # local iteration + local_model = nn.Sequential(*local_layers) + local_model(x).sum().backward() + + # remote iteration + with dist_autograd.context() as context_id: + for remote_layer in remote_layers: + x = remote_layer.rpc_sync().forward(x) + + dist_autograd.backward(context_id, [x.sum()]) + + futs = [] + for remote_layer in remote_layers: + futs.append(remote_layer.rpc_async().gradients(context_id)) + + for i in range(len(futs)): + local_gradients = [p.grad for p in local_layers[i].parameters()] + for g1, g2 in zip(futs[i].wait(), local_gradients): + self.assertEqual(g1, g2) + + rpc.shutdown() diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py new file mode 100644 index 0000000000000000000000000000000000000000..310dc740db6802dff140499aa68aa9dd18978891 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py @@ -0,0 +1,281 @@ +# mypy: allow-untyped-defs + + +import threading + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import optim +from torch.distributed.optim import DistributedOptimizer +from torch.testing._internal.dist_utils import dist_init +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +class MyModule: + lock = threading.Lock() + + def __init__(self, requires_grad=True): + # cannot directly use torch.manual_seed(0) as all threads share the same + # default generator. The race from multiple RPC threads could mess up + # the draw order from the default RNG instance, leading to + # non-deterministic behavior. Hence, create a dedicated RNG here. + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu) + + def forward(self, t1): + return torch.mm(self.w, t1) + + def get_w(self): + return self.w + + +class FailingOptimizer(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + + def step(self, closure=None): + raise ValueError("Error running optimizer.") + + +class OptimizerFailingOnConstructor(optim.Optimizer): + def __init__(self, params): + super().__init__(params, {}) + raise ValueError("Error creating optimizer.") + + def step(self, closure=None): + raise NotImplementedError + + +def _call_method(method, obj_rref, *args, **kwargs): + return method(obj_rref.local_value(), *args, **kwargs) + + +def remote_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.remote on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a RRef to the remote method call result. + """ + return rpc.remote( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +def rpc_async_method(method, obj_rref, *args, **kwargs): + """ + Call rpc.rpc_async on a method in a remote object. + + Args: + method: the method (for example, Class.method) + obj_rref (RRef): remote reference to the object + args: positional arguments to pass to the method + kwargs: keyword arguments to pass to the method + + Returns a Future to the method call result. + """ + return rpc.rpc_async( + obj_rref.owner(), + _call_method, + args=[method, obj_rref] + list(args), + kwargs=kwargs, + ) + + +class DistOptimizerTest(RpcAgentTestFixture): + @dist_init() + def test_dist_optim_exception(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + dist_optim = DistributedOptimizer( + FailingOptimizer, [remote_param1, remote_param2] + ) + + with dist_autograd.context() as context_id: + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1).sum() + + dist_autograd.backward(context_id, [loss]) + with self.assertRaisesRegex(Exception, "Error running optimizer"): + dist_optim.step(context_id) + + @dist_init() + def test_dist_optim_exception_on_constructor(self): + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + with self.assertRaisesRegex(Exception, "Error creating optimizer."): + dist_optim = DistributedOptimizer( + OptimizerFailingOnConstructor, [remote_param1, remote_param2] + ) + + def _test_dist_optim_base(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule() + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule) + remote_param1 = remote_method(MyModule.get_w, remote_module1) + remote_param2 = remote_method(MyModule.get_w, remote_module2) + + old_w1_remote = remote_param1.to_here() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = rpc_async_method(MyModule.forward, remote_module1, t2) + output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait() + new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait() + + # ensure optimizer changed weights + self.assertNotEqual(old_w1, new_w1) + self.assertNotEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim(self): + self._test_dist_optim_base(optim.Adagrad, lr=0.05) + self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True) + self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True) + self._test_dist_optim_base(optim.SGD, lr=0.05) + self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True) + self._test_dist_optim_base(optim.Adadelta, rho=0.95) + self._test_dist_optim_base(optim.RMSprop, lr=0.05) + self._test_dist_optim_base(optim.Adamax, lr=0.05) + self._test_dist_optim_base(optim.Rprop, lr=0.05) + + def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs): + # local version + module1 = MyModule() + module2 = MyModule(requires_grad=False) + params = [module1.get_w(), module2.get_w()] + local_optim = optim_cls(params, *args, **kwargs) + + old_w1 = module1.w.clone().detach() + old_w2 = module2.w.clone().detach() + + g_cpu = torch.Generator() + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = module1.forward(t2) + output2 = module2.forward(output1) + loss = torch.add(output2, t1).sum() + + loss.backward() + local_optim.step() + + # distributed version + owner1 = "worker%d" % ((self.rank + 1) % self.world_size) + owner2 = "worker%d" % ((self.rank + 2) % self.world_size) + + remote_module1 = rpc.remote(owner1, MyModule) + remote_module2 = rpc.remote(owner2, MyModule, args=(False,)) + remote_param1 = remote_module1.remote().get_w() + remote_param2 = remote_module2.remote().get_w() + + # sanity check: local and remote initial weights should match + self.assertEqual(old_w1, remote_param1.to_here()) + self.assertEqual(old_w2, remote_param2.to_here()) + + dist_optim = DistributedOptimizer( + optim_cls, [remote_param1, remote_param2], *args, **kwargs + ) + + with dist_autograd.context() as context_id: + g_cpu.manual_seed(0) + t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu) + output1 = remote_module1.rpc_async().forward(t2) + output2 = remote_module2.rpc_async().forward(output1.wait()) + loss = torch.add(output2.wait(), t1) + + dist_autograd.backward(context_id, [loss.sum()]) + dist_optim.step(context_id) + + new_w1 = remote_module1.rpc_async().get_w().wait() + new_w2 = remote_module2.rpc_async().get_w().wait() + + # ensure optimizer changed weights for w1 + self.assertNotEqual(old_w1, new_w1) + + # ensure optimizer not changed weights for w2 + self.assertEqual(old_w2, new_w2) + # ensure local equals remote + self.assertEqual(new_w1, module1.get_w()) + self.assertEqual(new_w2, module2.get_w()) + + @dist_init() + def test_dist_optim_none_grads(self): + self._test_dist_optim_none_grads(optim.SGD, lr=0.05) + self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Rprop, lr=0.05) + self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec272060d7c2a8687697bab76c60f1f3eb5a0f9d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f889481781045ad688ac0d2859e45a2feb0f584 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ccf14a0d79d143338e5c250effd0565166d12d2 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py new file mode 100644 index 0000000000000000000000000000000000000000..98b41920d2a7de6587866307ac4d5c6b6e12c525 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py @@ -0,0 +1,144 @@ +# mypy: allow-untyped-defs + +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py +# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server + +import threading +from datetime import datetime +from time import perf_counter + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +from torch import optim + +from torch.testing._internal.dist_utils import ( + dist_init, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +batch_size = 20 +in_features = 100 +out_features = 30 +num_batches = 4 + + +def timed_log(text): + print(f"{datetime.now().strftime('%H:%M:%S')} {text}") + + +class BatchUpdateParameterServer: + + def __init__(self, batch_update_size): + self.model = nn.Linear(in_features, out_features) + self.lock = threading.Lock() + self.future_model = torch.futures.Future() + self.batch_update_size = batch_update_size + self.curr_update_size = 0 + self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9) + for p in self.model.parameters(): + p.grad = torch.zeros_like(p) + + def get_model(self): + return self.model + + @staticmethod + @rpc.functions.async_execution + def update_and_fetch_model(ps_rref, grads): + self = ps_rref.local_value() + for p, g in zip(self.model.parameters(), grads): + if p.grad is None: + p.grad = g + else: + p.grad += g + with self.lock: + timed_log(f"PS got {self.curr_update_size}/{self.batch_update_size} updates") + self.curr_update_size += 1 + fut = self.future_model + + if self.curr_update_size >= self.batch_update_size: + for p in self.model.parameters(): + p.grad /= self.batch_update_size + self.curr_update_size = 0 + self.optimizer.step() + self.optimizer.zero_grad() + fut.set_result(self.model) + timed_log("PS updated model") + self.future_model = torch.futures.Future() + + return fut + + +class Trainer: + + def __init__(self, ps_rref): + self.ps_rref = ps_rref + self.loss_fn = nn.L1Loss() + + def get_next_batch(self): + for _ in range(num_batches): + inputs = torch.randn(batch_size, in_features) + labels = torch.zeros(batch_size, out_features) + yield inputs, labels + + def train(self): + name = rpc.get_worker_info().name + m = self.ps_rref.rpc_sync().get_model() + for inputs, labels in self.get_next_batch(): + timed_log(f"{name} processing one batch") + self.loss_fn(m(inputs), labels).backward() + timed_log(f"{name} reporting grads") + m = rpc.rpc_sync( + self.ps_rref.owner(), + BatchUpdateParameterServer.update_and_fetch_model, + args=(self.ps_rref, [p.grad for p in m.cpu().parameters()]), + ) + timed_log(f"{name} got updated model") + + +def run_trainer(ps_rref): + trainer = Trainer(ps_rref) + trainer.train() + + +def run_ps(trainers): + timed_log("Start training") + start = perf_counter() + ps_rref = rpc.RRef(BatchUpdateParameterServer(len(trainers))) + futs = [] + for trainer in trainers: + futs.append( + rpc.rpc_async(trainer, run_trainer, args=(ps_rref,)) + ) + + torch.futures.wait_all(futs) + stop = perf_counter() + timed_log("Finish training") + timed_log(f"Time spent training: {stop-start}s") + +class ParameterServerTest(RpcAgentTestFixture): + + @dist_init(setup_rpc=False) + def test_batch_updating_parameter_server(self): + + if self.rank != 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + else: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)]) + + rpc.shutdown() diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5d7e7b1244bcea221056a47186c8d4708a111cb2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py @@ -0,0 +1,261 @@ +# mypy: allow-untyped-defs + +# If you need to modify this file to make this test pass, please also apply same edits accordingly to +# https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py +# and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html + +import numpy as np +from itertools import count + +import torch +import torch.distributed.rpc as rpc +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote +from torch.distributions import Categorical + +from torch.testing._internal.dist_utils import dist_init, worker_name +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture + +TOTAL_EPISODE_STEP = 5000 +GAMMA = 0.1 +SEED = 543 + +def _call_method(method, rref, *args, **kwargs): + r""" + a helper function to call a method on the given RRef + """ + return method(rref.local_value(), *args, **kwargs) + + +def _remote_method(method, rref, *args, **kwargs): + r""" + a helper function to run method on the owner of rref and fetch back the + result using RPC + """ + args = [method, rref] + list(args) + return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs) + + +class Policy(nn.Module): + r""" + Borrowing the ``Policy`` class from the Reinforcement Learning example. + Copying the code to make these two examples independent. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + """ + def __init__(self) -> None: + super().__init__() + self.affine1 = nn.Linear(4, 128) + self.dropout = nn.Dropout(p=0.6) + self.affine2 = nn.Linear(128, 2) + + self.saved_log_probs = [] + self.rewards = [] + + def forward(self, x): + x = self.affine1(x) + x = self.dropout(x) + x = F.relu(x) + action_scores = self.affine2(x) + return F.softmax(action_scores, dim=1) + + +class DummyEnv: + r""" + A dummy environment that implements the required subset of the OpenAI gym + interface. It exists only to avoid a dependency on gym for running the + tests in this file. It is designed to run for a set max number of iterations, + returning random states and rewards at each step. + """ + def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0): + self.state_dim = state_dim + self.num_iters = num_iters + self.iter = 0 + self.reward_threshold = reward_threshold + + def seed(self, manual_seed): + torch.manual_seed(manual_seed) + + def reset(self): + self.iter = 0 + return torch.randn(self.state_dim) + + def step(self, action): + self.iter += 1 + state = torch.randn(self.state_dim) + reward = torch.rand(1).item() * self.reward_threshold + done = self.iter >= self.num_iters + info = {} + return state, reward, done, info + + +class Observer: + r""" + An observer has exclusive access to its own environment. Each observer + captures the state from its environment, and send the state to the agent to + select an action. Then, the observer applies the action to its environment + and reports the reward to the agent. + """ + def __init__(self) -> None: + self.id = rpc.get_worker_info().id + self.env = DummyEnv() + self.env.seed(SEED) + + def run_episode(self, agent_rref, n_steps): + r""" + Run one episode of n_steps. + Arguments: + agent_rref (RRef): an RRef referencing the agent object. + n_steps (int): number of steps in this episode + """ + state, ep_reward = self.env.reset(), 0 + for step in range(n_steps): + # send the state to the agent to get an action + action = _remote_method(Agent.select_action, agent_rref, self.id, state) + + # apply the action to the environment, and get the reward + state, reward, done, _ = self.env.step(action) + + # report the reward to the agent for training purpose + _remote_method(Agent.report_reward, agent_rref, self.id, reward) + + if done: + break + + +class Agent: + def __init__(self, world_size): + self.ob_rrefs = [] + self.agent_rref = RRef(self) + self.rewards = {} + self.saved_log_probs = {} + self.policy = Policy() + self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2) + self.eps = np.finfo(np.float32).eps.item() + self.running_reward = 0 + self.reward_threshold = DummyEnv().reward_threshold + for ob_rank in range(1, world_size): + ob_info = rpc.get_worker_info(worker_name(ob_rank)) + self.ob_rrefs.append(remote(ob_info, Observer)) + self.rewards[ob_info.id] = [] + self.saved_log_probs[ob_info.id] = [] + + def select_action(self, ob_id, state): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that instead of keeping all probs in one list, + the agent keeps probs in a dictionary, one key per observer. + + NB: no need to enforce thread-safety here as GIL will serialize + executions. + """ + probs = self.policy(state.unsqueeze(0)) + m = Categorical(probs) + action = m.sample() + self.saved_log_probs[ob_id].append(m.log_prob(action)) + return action.item() + + def report_reward(self, ob_id, reward): + r""" + Observers call this function to report rewards. + """ + self.rewards[ob_id].append(reward) + + def run_episode(self, n_steps=0): + r""" + Run one episode. The agent will tell each observer to run n_steps. + """ + futs = [] + for ob_rref in self.ob_rrefs: + # make async RPC to kick off an episode on all observers + futs.append( + rpc_async( + ob_rref.owner(), + _call_method, + args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps) + ) + ) + + # wait until all observers have finished this episode + for fut in futs: + fut.wait() + + def finish_episode(self): + r""" + This function is mostly borrowed from the Reinforcement Learning example. + See https://github.com/pytorch/examples/tree/master/reinforcement_learning + The main difference is that it joins all probs and rewards from + different observers into one list, and uses the minimum observer rewards + as the reward of the current episode. + """ + + # joins probs and rewards from different observers into lists + R, probs, rewards = 0, [], [] + for ob_id in self.rewards: + probs.extend(self.saved_log_probs[ob_id]) + rewards.extend(self.rewards[ob_id]) + + # use the minimum observer reward to calculate the running reward + min_reward = min(sum(self.rewards[ob_id]) for ob_id in self.rewards) + self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward + + # clear saved probs and rewards + for ob_id in self.rewards: + self.rewards[ob_id] = [] + self.saved_log_probs[ob_id] = [] + + policy_loss, returns = [], [] + for r in rewards[::-1]: + R = r + GAMMA * R + returns.insert(0, R) + returns = torch.tensor(returns) + returns = (returns - returns.mean()) / (returns.std() + self.eps) + for log_prob, R in zip(probs, returns): + policy_loss.append(-log_prob * R) + self.optimizer.zero_grad() + policy_loss = torch.cat(policy_loss).sum() + policy_loss.backward() + self.optimizer.step() + return min_reward + + +def run_agent(agent, n_steps): + for i_episode in count(1): + agent.run_episode(n_steps=n_steps) + last_reward = agent.finish_episode() + + if agent.running_reward > agent.reward_threshold: + print(f"Solved! Running reward is now {agent.running_reward}!") + break + + +class ReinforcementLearningRpcTest(RpcAgentTestFixture): + @dist_init(setup_rpc=False) + def test_rl_rpc(self): + if self.rank == 0: + # Rank 0 is the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + agent = Agent(self.world_size) + run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1))) + + # Ensure training was run. We don't really care about whether the task was learned, + # since the purpose of the test is to check the API calls. + self.assertGreater(agent.running_reward, 0.0) + else: + # Other ranks are observers that passively wait for instructions from the agent. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..132e30e5b5cfb41ab9c0a2a0f0f6f960d648c0f9 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py @@ -0,0 +1,326 @@ +# mypy: allow-untyped-defs + +import torch +import time +import torch.distributed.rpc as rpc +from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs +from torch.testing._internal.dist_utils import ( + dist_init, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + +def add_rref_to_value(rref, value): + return rref.to_here() + value + +class FaultyAgentRpcTest(RpcAgentTestFixture): + + # no faulty_messages defined so this fails all retryable messages - see + # faulty_rpc_agent_test_fixture.py for the list of retryable messages. + @dist_init(messages_to_delay={}) + def test_check_failed_messages(self): + if self.rank == 0: + dst_worker_b = worker_name((self.rank + 1) % self.world_size) + dst_worker_c = worker_name((self.rank + 2) % self.world_size) + + # Worker0 sends RPC to Worker1 and creates an RRef there + rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2))) + # Worker0 sends an RPC to Worker2 with the RRef as an arg + rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2))) + # check if the output is as expected + self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2))) + # explicitly delete all User RRefs + _delete_all_user_and_unforked_owner_rrefs() + + @dist_init + def test_verify_backend_options(self): + self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE) + self.assertEqual(self.rpc_backend_options.num_worker_threads, 8) + self.assertEqual(self.rpc_backend_options.num_fail_sends, 3) + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4) + self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2) + self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]) + def test_custom_faulty_messages(self): + self.assertEqual( + {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"}, + set(self.rpc_backend_options.messages_to_fail), + ) + + @dist_init(faulty_messages=[]) + def test_no_faulty_messages(self): + self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0) + + @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_custom_messages_to_delay(self): + self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5}) + + def _test_remote_message_dropped_pickle(self, dst=None): + if self.rank != 0: + return + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, my_sleep_func, args=(1,)) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref._serialize() + # Test that using RRef as arg over RPC (which forks) results in the same + # error + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1)) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle(self): + self._test_remote_message_dropped_pickle() + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_remote_message_dropped_pickle_to_self(self): + self._test_remote_message_dropped_pickle(self.rank) + + + def _test_remote_message_dropped_timeout(self, func, args, dst=None): + if self.rank != 0: + return + + # test the case where rpc.remote() message creation is completely dropped. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # Since we fail python_remote_call messages synchronously, the future + # corresponding to this remote call will be marked with an error when + # this function returns. + rref = rpc.remote(dst_worker, func, args=args) + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + # Note: during shutdown, logs will indicate "Could not find OwnerRRef..." + # on the owning nodes, this is expected because the OwnerRRef was never + # successfully created. Therefore, delAllUsers will work as expected. + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_builtin_remote_message_dropped_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args) + + @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"]) + def test_udf_remote_message_dropped_timeout_to_self(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_dropped_timeout(func, args, dst=0) + + def _test_remote_message_delay_timeout(self, func, args, dst=None): + if self.rank != 0: + return + # Test the case where remote message is eventually processed on the owner, + # but the future on the creator times out before the response comes back. + dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, func, args=args, timeout=0.001) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + # to_here() should now pick up that rpc.remote() creation has failed. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + # Test the case where rpc.remote() times out, but to_here() has already + # started blocking before. + # NOTE: we only test this when not sending to self, as to_here() calls + # calls localValue(), which does not send an RPC and thus does not have + # a timeout. This can be supported by allowing future.wait() to + # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280) + if dst_rank != self.rank: + slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2) + + with self.assertRaisesRegex(RuntimeError, expected_error): + # to_here() should raise timeout error, since it does not know about the + # status of rpc.remote(). + slow_rref.to_here(0.001) + # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete + # but this can be a noop since it may not exist on the owner yet. Later, + # the owner can process the RRef creation and wait for the delete message, + # thus leading to a timeout. + # Therefore, we wait until we get notification that pending owners have + # been confirmed before sending out RRefUserDeletes. + if dst_rank != self.rank: + wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout(self): + func = my_sleep_func + args = (2,) + self._test_remote_message_delay_timeout(func, args) + + @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2}) + def test_udf_remote_message_delay_timeout_to_self(self): + func = my_sleep_func + args = (1,) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_builtin_delay_timeout_to_self(self): + func = torch.add + args = (torch.tensor(1), torch.tensor(1)) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args) + + @dist_init( + faulty_messages=[], + messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1}, + ) + def test_remote_message_script_delay_timeout_to_self(self): + func = my_script_func + args = (torch.tensor(1),) + self._test_remote_message_delay_timeout(func, args, dst=0) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref.to_here(0.01) + + rref.to_here() + + @dist_init(faulty_messages=[]) + def test_rpc_builtin_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + # PYTHON_CALL message types which correspond to Python UDF over RPC + # by default get a delay (see faulty_rpc_agent_test_fixture) + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync( + dst_worker, + torch.add, + args=(torch.tensor(1), torch.tensor(1)), + timeout=1, + ) + + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1 + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + fut = rpc.rpc_async( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_rpc_script_timeout(self): + next_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(next_rank) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + + fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure that the currently set default timeout is large enough such + # that RPCs with delays still complete. + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + fut.wait() + + # Ensure timeout if we set a new default and don't override + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),) + ) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if we specify timeout of 0 + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async( + dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0 + ) + fut.wait() + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..ca584c1dc95aa0cd8f4d21ac8f0a31d214974106 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py @@ -0,0 +1,62 @@ +# mypy: allow-untyped-defs + +import torch.distributed.rpc as rpc +import torch.distributed.rpc._testing # noqa: F401 +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +# The following message types are currently retried in the RREF protocol and +# distributed autograd. Thus only these messages should be tested with the +# Faulty RPC Agent. +retryable_message_types = ["RREF_FORK_REQUEST", + "RREF_CHILD_ACCEPT", + "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ"] + +# The following messages incur the corresponding delay in seconds while being +# processed in FaultyTensorPipeAgent's enqueueSend() function. +default_messages_to_delay = { + "PYTHON_CALL": 1.5, # Python UDF + "SCRIPT_CALL": 1.5, # Script/Builtin +} + +class FaultyRpcAgentTestFixture(RpcAgentTestFixture): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.messages_to_fail = retryable_message_types + self.messages_to_delay = default_messages_to_delay + + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "FAULTY_TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + num_worker_threads=8, + num_fail_sends=3, + messages_to_fail=self.messages_to_fail, + messages_to_delay=self.messages_to_delay, + ) + + def setup_fault_injection(self, faulty_messages, messages_to_delay): + if faulty_messages is not None: + self.messages_to_fail = faulty_messages + if messages_to_delay is not None: + self.messages_to_delay = messages_to_delay + + def get_shutdown_error_regex(self): + error_regexes = [ + "Exception in thread pool task", + "Connection reset by peer", + "Connection closed by peer" + ] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea7a85204fd8f9f73df7f13236e6c690c751b7a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b60b85cbf29c4e6323a9b723443da9752d1a0a4 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py new file mode 100644 index 0000000000000000000000000000000000000000..a1163adb97cc88db8ad779d15f0ad8dbdeb4e840 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py @@ -0,0 +1,116 @@ +# mypy: allow-untyped-defs + +from typing import Dict, Tuple + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.distributed.rpc import rpc_async +from torch.testing import FileCheck +from torch.testing._internal.dist_utils import dist_init, worker_name +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +@torch.jit.script +def local_add(t1, t2): + return torch.add(t1, t2) + + +@torch.jit.script +def remote_add(t1, t2, dst: str): # noqa: E999 + return rpc_async(dst, local_add, (t1, t2)).wait() + + +@torch.jit.script +def fork_add(t1, t2, dst: str): + fut = torch.jit._fork(remote_add, t1, t2, dst) + return torch.jit._wait(fut) + + +class JitDistAutogradTest(RpcAgentTestFixture): + @dist_init + def test_get_gradients(self): + dst_rank = self.rank + + @torch.jit.script + def dist_get_gradients(context_id: int) -> (Dict[Tensor, Tensor]): + return dist_autograd.get_gradients(context_id) + + FileCheck().check("get_gradients").run(str(dist_get_gradients.graph)) + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + t3 = torch.add(t1, t2) + + dist_autograd.backward(context_id, [t3.sum()]) + grads = dist_get_gradients(context_id) + + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + self.assertEqual(torch.ones(3, 3), grads[t1]) + self.assertEqual(torch.ones(3, 3), grads[t2]) + + @dist_init + def test_dist_backward(self): + if self.rank != 0: + return + + @torch.jit.script + def dist_backward_script(context_id: int, loss: torch.Tensor): + dist_autograd.backward(context_id, [loss]) + + FileCheck().check("dist_backward").run(str(dist_backward_script.graph)) + with dist_autograd.context() as context_id: + t1 = torch.rand(3, 3, requires_grad=True) + t2 = torch.rand(3, 3, requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum() + dist_backward_script(context_id, loss) + + @dist_init + def test_jit_fork_within_context(self): + with dist_autograd.context() as context_id: + t1 = torch.rand((3, 3), requires_grad=True) + t2 = torch.rand((3, 3), requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + res = fork_add(t1, t2, dst_worker_name) + loss = res.sum() + dist_autograd.backward(context_id, [loss]) + + grads = dist_autograd.get_gradients(context_id) + self.assertEqual(2, len(grads)) + self.assertIn(t1, grads) + self.assertIn(t2, grads) + + @dist_init + def test_restore_context_after_swtich_to_jit_thread(self): + if self.rank != 0: + return + + @torch.jit.script + def forward_script( + context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor + ) -> Tuple[Tensor, Tensor]: + res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1)) + res1 = res1_fut.wait() # After this, the script runs in a new JIT thread. + loss1 = res1.sum() + + # SendRpcBackward is not attached, since DistAutogradContext is lost here. + res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2)) + res2 = res2_fut.wait() + loss2 = res2.sum() + + return loss1, loss2 + + with dist_autograd.context() as context_id: + t1 = torch.ones((2, 3), requires_grad=True) + t2 = torch.ones((2, 3), requires_grad=True) + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2) + dist_autograd.backward(context_id, [loss0, loss1]) + grad0, grad1 = dist_autograd.get_gradients(context_id) + self.assertEqual(grad0, grad1) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2f83eb3311c650323083441b63fc7ec753a102fb --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py @@ -0,0 +1,1385 @@ +# mypy: allow-untyped-defs + +import time +import io +from typing import Dict, List, Tuple, Any + +import torch +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.autograd.profiler import record_function +from torch.distributed.rpc import RRef +from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key +from torch.futures import Future +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + +from torch.autograd.profiler_legacy import profile as _profile + +def rref_isinstance(rref, cls_to_check): + return isinstance(rref.local_value(), cls_to_check) + +def sleep(t): + time.sleep(t) + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +@torch.jit.script +def rref_local_value(rref: RRef[Tensor]) -> Tensor: + return rref.local_value() + + +@torch.jit.script +def list_create() -> List[int]: + global_list = [1, 2, 3] + return global_list + + +@torch.jit.script +def rref_list_mutate(rref: RRef[List[int]]) -> None: + rref.local_value().append(4) + rref.to_here().append(5) + rref.to_here(5.0).append(6) + + +def return_value(value: int) -> int: + return value + + +class RRefAPITest: + @dist_init + def test_rref_is_owner(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref_var = rpc_return_rref(dst_worker_name) + + @torch.jit.script + def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool: + return rref_var.is_owner() + + res = rref_tensor_is_owner(rref_var) + self.assertEqual(res, False) + + @dist_init + def test_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + rref = rpc_return_rref(dst_worker_name) + + with self.assertRaisesRegex( + RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef" + ): + rref_local_value(rref) + + ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,)) + self.assertEqual(ret, torch.add(torch.ones(2, 2), 1)) + + @dist_init + def test_local_rref_local_value(self): + if self.rank != 0: + return + + dst_worker_name = worker_name(self.rank) + rref = rpc.remote(dst_worker_name, return_value, (5,), {}) + + ret = rref_local_value(rref) + self.assertEqual(ret, 5) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), script_check_rref_confirmed, args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_list_mutate(self): + dst = worker_name((self.rank + 1) % self.world_size) + list_rref = rpc.remote(dst, list_create) + + rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,)) + self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6]) + + +@torch.jit.script +def no_arg(): + return 0 + + +@torch.jit.script +def one_arg(value): + return value + 1 + +@torch.jit.script +def script_add_ones(x): + return torch.add(x, torch.ones(1)) + +@torch.jit.script +def script_add_ones_with_record_function(x, block: str): + with record_function(block): + return torch.add(x, torch.ones(1)) + + +@torch.jit.script +def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor: + t: Tensor = torch.ones(1) + with record_function(block) as rf: + fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + # Extra operator call to avoid de-duplication of the next async call + # see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279 + zero = torch.zeros_like(t) + fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, )) + res = fut1.wait() + fut2.wait() + zero + return res + + + +@torch.jit.script +def script_fork_wait_udf(tensor): + fut = torch.jit._fork(script_add_ones, tensor) + x = torch.jit._wait(fut) + return x + + +@torch.jit.script +def rref_to_here(rref_var: RRef[Tensor]) -> Tensor: + return rref_var.to_here() + + +@torch.jit.script +def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def script_raise_func(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + + +@torch.jit.script +def script_fork_wait_throw(invalue): + fut = torch.jit._fork(script_raise_func, invalue) + value = torch.jit._wait(fut) + return value + + +@torch.jit.script +def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor: + # Call rpc_async from within ScriptFunction and ensure that we can attach + # profiling callbacks. Note that handle here is a Tensor representation of + # RecordFunction. + fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + +@torch.jit.script +def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor: + fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block)) + return fut.wait() + + +@torch.jit.script +def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor: + # Call fork from within ScriptFunction and ensure that we can attach profiling + # callbacks to the resulting future. Note that handle here is a Tensor + # representation of RecordFunction. + fut = torch.jit._fork(one_arg, torch.tensor(1)) + torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut) + ret = fut.wait() + return ret + + +class MyScriptModuleWithRRefs(torch.jit.ScriptModule): + def __init__(self, dst_worker): + super().__init__() + self.rrefs = [] + for _ in range(4): + self.rrefs.append(rpc_return_rref(dst_worker)) + + @torch.jit.script_method + def forward(self) -> Tensor: + res_tensor = torch.ones(2, 2) + for rref in self.rrefs: + res_tensor += rref.to_here() + + return res_tensor + + +@torch.jit.ignore +def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]: + return rref_var + + +@torch.jit.script +def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor: + return rref_python_annotation(rref_var).to_here() + + +class RRefTypingTest: + @dist_init + def test_rref_as_arg_and_return(self): + n = self.rank + 1 + dst_rank = n % self.world_size + local_ret = one_arg(torch.ones(2, 2)) + + # create rref on current rank + rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)) + + # pass rref to another user in rpc call + ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(ret, local_ret) + + # return rref in rpc call + rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref1.to_here(), local_ret) + + # pass rref to another user in remote call + rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,)) + self.assertEqual(rref2.to_here(), local_ret) + + # return rref in remote call + rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,)) + self.assertEqual(rref3.to_here().to_here(), local_ret) + + @dist_init + def test_my_script_module_with_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank)) + res = module_with_rrefs() + self.assertEqual(res, torch.ones(2, 2) * 9) + + @dist_init + def test_rref_python_annotation(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + + res = rref_script_annotation(rref_var) + self.assertEqual(res, torch.ones(2, 2) + 1) + + +class FutureTypingTest: + @dist_init + def test_future_passed_between_python_and_jit(self): + dst_rank = (self.rank + 1) % self.world_size + inputs = (torch.tensor([1, 1]), torch.tensor([2, 2])) + ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs) + expected_res = torch.tensor([10, 10]) + + @torch.jit.script + def future_wait_in_script(fut: Future[Tensor]) -> Tensor: + return fut.wait() + + self.assertEqual(future_wait_in_script(ret_fut), expected_res) + + @torch.jit.script + def future_return_to_python( + dst_rank: int, inputs: Tuple[Tensor, Tensor] + ) -> Future[Tensor]: + return rpc.rpc_async( + f"worker{dst_rank}", two_args_two_kwargs, inputs + ) + + fut_res = future_return_to_python(dst_rank, inputs) + self.assertEqual(fut_res.wait(), expected_res) + + @dist_init + def test_future_python_annotation(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + input_0 = torch.ones(2, 2) + input_1 = 1 + expected_res = torch.add(input_0, input_1) + + @torch.jit.ignore + def python_return_future() -> Future[Tensor]: + fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {}) + return fut + + @torch.jit.script + def script_use_future() -> Tensor: + fut = python_return_future() + return fut.wait() + + res = script_use_future() + self.assertEqual(res, expected_res) + + +@torch.jit.script +class MyScriptClass: + def __init__(self, a: int): + self.a = a + + def get_value(self) -> int: + return self.a + + +@torch.jit.interface +class MyModuleInterface(torch.nn.Module): + def forward(self) -> Tensor: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +class MyScriptModule(torch.jit.ScriptModule): + def __init__(self, rank): + super().__init__() + self.a = torch.ones(rank) + + @torch.jit.script_method + def forward(self) -> Tensor: + return self.a + + @torch.jit.script_method + def custom_func(self) -> Tensor: + return self.a + + +def owner_create_rref_my_script_class(a): + return rpc.RRef(MyScriptClass(a)) + + +def owner_create_rref_my_script_module(a): + return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface) + + +@torch.jit.script +def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int: + return rref.to_here().get_value() + + +@torch.jit.script +def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor: + return rref.to_here().forward() + + +class LocalRRefTest: + @dist_init + def test_create_local_script_class_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_class = rpc.RRef(MyScriptClass(self.rank)) + ret = rref_script_class.to_here().get_value() + self.assertEqual(ret, self.rank) + + @dist_init + def test_create_local_script_module_rref_in_py(self): + if self.rank != 0: + return + + # Create a local RRef. + rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface) + ret = rref_script_module.to_here().forward() + self.assertEqual(ret, torch.ones(self.rank)) + + # Create a local RRef without type hint. + with self.assertRaisesRegex( + RuntimeError, + ( + "The RRef being created contains a ScriptModule, " + "must provide its ModuleInterface type hint." + ), + ): + rref_script_module = rpc.RRef(MyScriptModule(self.rank)) + + @dist_init + def test_return_local_script_class_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner(), script_rref_get_value_my_script_class, args, kwargs + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, self.rank) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, self.rank) + + @dist_init + def test_return_local_script_module_rref_in_py_and_use_in_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Create a local RRef remotely in Python. + rref = rpc.rpc_sync( + dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,) + ) + + def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor: + args = (rref,) + kwargs: Dict[str, Any] = {} + fut = rpc.rpc_async( + rref.owner_name(), + script_rref_run_forward_my_script_module, + args, + kwargs, + ) + ret = fut.wait() + return ret + + # Use RRef in local Python RPC and remote Script run. + ret = use_rref_on_owner(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + # Use RRef in local Script RPC and remote Script run. + use_rref_on_owner_script = torch.jit.script(use_rref_on_owner) + ret = use_rref_on_owner_script(rref) + self.assertEqual(ret, torch.ones(self.rank)) + + +def python_function(): + return 0 + + +@torch.jit.script +def two_args_two_kwargs( + first_arg, + second_arg, + first_kwarg=torch.tensor([3, 3]), + second_kwarg=torch.tensor([4, 4]), +): + return first_arg + second_arg + first_kwarg + second_kwarg + + +@torch.jit.script +def assorted_types_args_kwargs( + tensor_arg: Tensor, # noqa: E999 + str_arg: str, + int_arg: int, + tensor_kwarg: Tensor = torch.tensor([2, 2]), + str_kwarg: str = "str_kwarg", + int_kwarg: int = 2, +): + return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg + + +@torch.jit.script +def raise_script(): + raise RuntimeError("Expected error") + + +@torch.jit.script +def script_rpc_async_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + +@torch.jit.script +def script_rpc_sync_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs) + return res + +@torch.jit.script +def script_rpc_remote_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs) + return rref_res.to_here() + +class JitRpcOpTest: + # Call functions remotely from Script. + @dist_init + def test_all_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([10, 10])) + + @dist_init + def test_some_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"first_kwarg": torch.tensor([2, 2])} + + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([9, 9])) + + @dist_init + def test_no_kwargs_are_populated_by_defaults(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]: + ret = script_op( + dst_worker_name, args, kwargs + ) + self.assertEqual(ret, torch.tensor([8, 8])) + + @dist_init + def test_args_and_kwargs_contain_different_types(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_with_assorted_types( + dst_worker_name: str, + ): + args = (torch.tensor([1, 1]), "str_arg", 1) + # Must annotate the value type as `Any`, because JIT type inference + # does not support multiple types when defining a Dict. + # The error JIT gives is, + # "Dict values must contain only a single type, " + # "expected: Tensor but found str instead." + kwargs: Dict[str, Any] = { + "tensor_kwarg": torch.tensor([3, 3]), + "str_kwarg": "_str_kwarg", + "int_kwarg": 3, + } + fut = rpc.rpc_async( + dst_worker_name, assorted_types_args_kwargs, args, kwargs + ) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_with_assorted_types( + dst_worker_name + ) + self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4)) + + @dist_init + def test_kwargs_not_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_kwargs_passed( + dst_worker_name: str, + ): + args = () + fut = rpc.rpc_async(dst_worker_name, no_arg, args) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_args_kwargs_are_neither_passed(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name: str, + ): + fut = rpc.rpc_async(dst_worker_name, no_arg) + ret = fut.wait() + return ret + + ret = script_rpc_async_call_without_args_kwargs_passed( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_less_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"): + + @torch.jit.script + def script_rpc_async_call_with_less_args( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]),) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_more_than_needed_args_are_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, args matching happens during scripting. + with self.assertRaisesRegex( + RuntimeError, + "Expected at most 4 arguments but found 5 positional arguments", + ): + + @torch.jit.script + def script_rpc_async_call_with_more_args( + dst_worker_name: str, + ): + args = ( + torch.tensor([1, 1]), + torch.tensor([2, 2]), + torch.tensor([3, 3]), + torch.tensor([4, 4]), + torch.tensor([5, 5]), + ) + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + @dist_init + def test_unexepected_kwarg_is_specified(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, kwargs matching happens during execution. + @torch.jit.script + def script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name: str, # noqa: E999 + ): + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {"third_kwarg": torch.tensor([1, 1])} + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "Unknown keyword argument 'third_kwarg'" + ): + ret = script_rpc_async_call_with_unexpected_kwarg( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_python_function_remotely_from_script_not_supported(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function" + ): + ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_raises_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + # Notice, TorchScript always translates(emits) Python `raise` statement, + # as the exception message string, "Exception", + # no matter what exception type and exception message are in the statement, + @torch.jit.script + def rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex(RuntimeError, "Expected error"): + ret = rpc_async_call_remote_raising_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + @dist_init + def test_call_script_function_that_not_exists_remotely_from_script(self): + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + @torch.jit.script + def nonexisting_script(): + return 0 + + @torch.jit.script + def rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name: str, + ): + args = () + kwargs = {} + fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs) + ret = fut.wait() + return ret + + with self.assertRaisesRegex( + RuntimeError, "attempted to get undefined function nonexisting_script" + ): + ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript( + dst_worker_name + ) + self.assertEqual(ret, 0) + + +@torch.jit.ignore +def my_script_module_init(rank: int) -> MyModuleInterface: + return MyScriptModule(rank) + + +@torch.jit.script +def construct_my_script_module(rank: int) -> MyModuleInterface: + return my_script_module_init(rank) + + +@torch.jit.script +def run_ref_script_module( + ref_script_module: RRef[MyModuleInterface], t: Tensor +) -> Tensor: + module = ref_script_module.to_here() + return module.forward() + t + + +@torch.jit.script +def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool: + return rref.confirmed_by_owner() + + +@torch.jit.script +def save_rref(rref_var: RRef[Tensor], fname: str) -> None: + torch.save(rref_var, fname) + + +@torch.jit.script +def script_add(x: Tensor, y: Tensor) -> Tensor: + return x + y + + +@rpc.functions.async_execution +@torch.jit.script +def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + +@rpc.functions.async_execution +@torch.jit.script +def async_wrong_type() -> Tensor: + return torch.zeros(2) + + +def load_script_module_with_pickled_rref(pickled_script_module): + f = io.BytesIO(pickled_script_module) + m = torch.jit.load(f) + return m() + + +class JitRpcTest( + RRefAPITest, + RRefTypingTest, + LocalRRefTest, + JitRpcOpTest, + FutureTypingTest, + RpcAgentTestFixture, +): + @dist_init + def test_torchscript_function(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + local_ret = one_arg(torch.ones(2, 2)) + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(ret, local_ret) + rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),)) + self.assertEqual(rref.to_here(), local_ret) + # create rref to itself + local_rref = rpc.remote( + worker_name(self.rank), one_arg, args=(torch.ones(2, 2),) + ) + self.assertEqual(local_rref.to_here(), local_ret) + + @dist_init + def test_torchscript_function_exception(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20)) + + with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"): + rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20)) + + @dist_init + def test_torchscript_functions_not_supported(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + my_local_script_module = MyScriptModule(self.rank) + + # It is not thread safe to instantiate MyScriptModule in multiple threads, + # wait for local MyScriptModule instantiation to finish, + # otherwise it could instantiate MyScriptModule in parallel with + # server thread in the below + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + # rpc_sync still accepts script class and run it in + # the same code path as python call. + ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,)) + + # rpc_sync does not accept script module method. + # Python 3.5 and Python 3.6 throw different error message, the only + # common word can be greped is "pickle". + with self.assertRaisesRegex(TypeError, "pickle"): + ret = rpc.rpc_async( + dst_worker_name, my_local_script_module.forward, args=() + ) + + @dist_init + def test_remote_script_module(self): + # TODO, need more investigation + # there is rref leak when shutting down, suspect it is because + # ref as arg is passed to pybind boundary, and the ref is not garbage + # collected by python when calling shutdown() + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = True + + local_ret = torch.ones(self.rank) + torch.ones(self.rank) + + n = self.rank + 1 + dst_rank = n % self.world_size + remote_ref = rpc.remote( + worker_name(dst_rank), construct_my_script_module, args=(self.rank,) + ) + + # pass rref arg to owner + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + self.assertEqual(ret, local_ret) + + # pass rref arg to self/user + with self.assertRaisesRegex( + RuntimeError, + "is an RRef to a ScriptModule. It can't be sent through RPC from owner,", + ): + ret = rpc.rpc_sync( + worker_name(self.rank), + run_ref_script_module, + args=(remote_ref, torch.ones(self.rank)), + ) + + @dist_init + def test_create_script_module_on_remote(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + # Construct on remote end with rpc_sync + created_script_module = rpc.rpc_sync( + dst_name, MyScriptModule, args=(self.rank,) + ) + # Forward should output a ones tensor of self.rank. + self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = created_script_module() + self.assertEqual(torch.ones(self.rank), rank_ones_tensor) + + # Construct ScriptModule with rpc.remote. + remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,)) + # Verify it is an instance of ScriptModule on remote end. + remote_end_is_script = rpc.rpc_sync( + remote_script_module.owner(), + rref_isinstance, + args=(remote_script_module, torch.jit.ScriptModule), + ) + self.assertTrue(remote_end_is_script) + # Run forward pass remotely. + remote_forward_output = remote_script_module.rpc_sync().forward() + self.assertEqual(remote_forward_output, torch.ones(self.rank)) + # Run function defined on ScriptModule remotely. + remote_func_output = remote_script_module.rpc_sync().custom_func() + self.assertEqual(remote_func_output, torch.ones(self.rank)) + # Ensure we can transfer ScriptModule RRef to this rank and run + # forward pass. + local_script_module = remote_script_module.to_here() + self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule)) + rank_ones_tensor = local_script_module() + self.assertEqual(rank_ones_tensor, torch.ones(self.rank)) + local_script_func_output = local_script_module.custom_func() + self.assertEqual(local_script_func_output, torch.ones(self.rank)) + + @dist_init + def test_load_script_module_with_pickled_rref(self): + dst_name = worker_name((self.rank + 1) % self.world_size) + m1 = MyScriptModuleWithRRefs(dst_name) + m2 = MyScriptModuleWithRRefs(dst_name) + + f = io.BytesIO() + + rpc._enable_jit_rref_pickle() + torch.jit.save(m1, f) + rpc._disable_jit_rref_pickle() + + out1 = rpc.rpc_sync( + dst_name, + load_script_module_with_pickled_rref, + args=(f.getvalue(),) + ) + out2 = m2() + self.assertEqual(out1, out2) + + @dist_init + def test_rref_jit_pickle_not_supported(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_var = rpc_return_rref(worker_name(dst_rank)) + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, "RRef jit pickling is only allowed inside RPC calls" + ): + save_rref(rref_var, fname) + + @dist_init + def test_remote_script_throw(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_raise_func, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_remote_script_udf(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2) * 2) + + @dist_init + def test_async_script_udf(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + self.assertEqual(future.wait(), torch.ones(2) * 2) + + @dist_init + def test_callback_simple(self): + def callback(fut): + return fut.wait() + 1 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ).then(callback) + self.assertEqual(future.wait(), torch.ones(2) * 2 + 1) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_add_done_callback(self): + callback_called = None + + def callback(fut): + nonlocal callback_called + callback_called = fut.wait() * 2 + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_udf, + args=(torch.ones(2),), + ) + + future.add_done_callback(callback) + future_then = future.then(lambda _: True) + + self.assertEqual(future.wait(), torch.ones(2) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + future_then.wait() + self.assertEqual(callback_called, torch.ones(2) * 4) + + @dist_init + def test_async_script_throw(self): + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + future.wait() + + @dist_init + def test_callback_with_exception(self): + def callback(fut): + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + fut.wait() + raise RuntimeError("Another expected error") + + future = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + script_fork_wait_throw, + args=(torch.ones(2),), + ).then(callback) + + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + future.wait() + + @dist_init + def test_call_rpc_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function that calls rpc_async + if self.rank == 0: + with _profile() as prof: + prof_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(one_arg), + "worker0", + "worker1", + ) + with torch.autograd.profiler.record_function(prof_key) as rf: + ret = call_rpc_with_profiling(rf.record, "worker1") + # TODO: Can't get a reliable time for this profiling event since + # it's hard to estimate the execution time on the remote end for non-UDFs. + # This can be resolved by https://github.com/pytorch/pytorch/issues/36272. + # After that, this test should be modified to validate the function time. + events = prof.function_events + function_event = get_function_event(events, prof_key) + self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name) + + @dist_init + def test_rpc_async_jit_profiled(self): + # Tests that rpc_async calls made from within a TorchScript function are + # profiled. + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = {} + with _profile() as prof: + script_rpc_async_call( + dst_worker_name, args, kwargs + ) + + # Ensure rpc_async call is profiled + function_events = prof.function_events + qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs) + rpc_async_jit_event = [ + event + for event in function_events + if qual_name in event.name and event.node_id == self.rank + ] + self.assertEqual(len(rpc_async_jit_event), 1) + rpc_async_jit_event = rpc_async_jit_event[0] + profiled_name = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + qual_name, + worker_name(self.rank), + dst_worker_name, + ) + self.assertEqual(profiled_name, rpc_async_jit_event.name) + remote_events = [event for event in function_events if event.is_remote] + # All remote events should have taken place on dst_rank + remote_event_node_ids = { + remote_event.node_id for remote_event in remote_events + } + self.assertEqual(remote_event_node_ids, {dst_rank}) + # script_rpc_async_call invokes add operator + # so we should see this as a remote event. + remote_add = next( + remote_event + for remote_event in remote_events + if "aten::add" in remote_event.name + ) + remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add" + self.assertEqual(remote_add.name, remote_add_profiled_name) + + @dist_init + def test_record_function_on_caller_rpc_async(self): + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + # Runs 2 rpc_async calls within JIT under record_function. + record_function_on_caller_rpc_async(dst_worker_name, block_scope) + + # Ensure record_function event is profiled. + function_events = prof.function_events + record_function_scope_event = [ + event for event in function_events if event.name == block_scope + ] + self.assertEqual(1, len(record_function_scope_event)) + record_function_scope_event = record_function_scope_event[0] + # Ensure RPC future is profiled. + expected_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name(script_add_ones), + worker_name(self.rank), + dst_worker_name, + ) + jit_rpc_events = [ + event for event in function_events if event.name == expected_key + ] + self.assertEqual(2, len(jit_rpc_events)) + # Validate that the record_function scope time is greater than both + # of the individual RPC async call times. The reason it is not necessarily + # greater than the sum is because the two can execute in parallel. + for jit_rpc_event in jit_rpc_events: + self.assertTrue( + record_function_scope_event.cpu_time_total + > jit_rpc_event.cpu_time_total + ) + + @dist_init + def test_rpc_torchscript_record_function(self): + # tests that torchscript functions can be profiled using with + # record_function(...) over RPC. + REMOTE_OP_STR = "#remote_op: " + if self.rank == 0: + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + block_scope = "foo" + with _profile() as prof: + call_rpc_torchscript_with_record_function(dst_worker_name, block_scope) + + # Need to call below to populate CPU children. + prof.key_averages() + function_events = prof.function_events + expected_key = ( + _build_rpc_profiling_key( + RPCExecMode.ASYNC_JIT, + torch._jit_internal._qualified_name( + script_add_ones_with_record_function + ), + worker_name(self.rank), + dst_worker_name, + ) + + REMOTE_OP_STR + + block_scope + ) + remote_record_function_event = next( + evt for evt in function_events if evt.name == expected_key + ) + self.assertTrue(block_scope in remote_record_function_event.name) + remote_children = remote_record_function_event.cpu_children + self.assertTrue("aten::add" in child.name for child in remote_children) + + def test_record_function_jit_end_callbacks_with_fork(self): + # Ensures that we can call rf._call_end_callbacks_on_future on a jit + # future in python eager mode with torch.jit.fork + sleep_interval = 1 + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + fut = torch.jit._fork(sleep, sleep_interval) + rf._call_end_callbacks_on_future(fut) + fut.wait() + + function_events = prof.function_events + sleep_event = get_function_event(function_events, "foo") + self.assertEqual(sleep_event.name, "foo") + # Validate that callbacks were fired at the right time by checking the + # profiling event cpu time + self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval) + + def test_call_fork_in_jit_with_profiling(self): + # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit + # future from within a script function with torch.jit.fork + with _profile() as prof: + with torch.autograd.profiler.record_function("foo") as rf: + ret = call_fork_with_profiling(rf.record) + + events = prof.function_events + function_event = get_function_event(events, "foo") + self.assertEqual(function_event.name, "foo") + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_wrong_return_type(self): + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rpc.rpc_sync( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + @dist_init + def test_async_function_wrong_decorator_order(self): + # @torch.jit.script complains about undefined value rpc. Error is shown + # below. The reason for not checking error string is to avoid making + # JIT error handling code depend on RPC tests, as we don't have any + # restrictions on the error message here. + # + # RuntimeError: + # undefined value rpc: + # def async_wrong_decorator_order(to, x, y): + # # type: (str, Tensor, Tensor) -> Future[Tensor] + # return rpc.rpc_async(to, script_add, (x, y)) + # ~~~ <--- HERE + with self.assertRaises(RuntimeError): + + @torch.jit.script + @rpc.functions.async_execution + def async_wrong_decorator_order( + to: str, x: Tensor, y: Tensor + ) -> Future[Tensor]: + return rpc.rpc_async(to, script_add, (x, y)) + + @dist_init + def test_async_function_remote(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + rref = rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2)) + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_async_function_remote_multi(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + rrefs = [] + for i in range(num): + rrefs.append( + rpc.remote( + dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i) + ) + ) + + for i in range(num): + self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), async_wrong_type + ) + + with self.assertRaisesRegex( + RuntimeError, + "Async functions must return an IValue of Future type, but got Tensor", + ): + rref.to_here() diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py new file mode 100644 index 0000000000000000000000000000000000000000..0e49793634197e845969c745083d2532ab2988f4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py @@ -0,0 +1,218 @@ +# mypy: allow-untyped-defs + +from typing import Dict, Tuple + +import torch +import torch.distributed.rpc as rpc +from torch import Tensor +from torch.distributed.rpc import RRef +from torch.testing._internal.dist_utils import ( + dist_init, + worker_name, + wait_until_pending_futures_and_users_flushed +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +@torch.jit.script +def two_args_two_kwargs( + first_arg, + second_arg, + first_kwarg=torch.tensor([3, 3]), + second_kwarg=torch.tensor([4, 4]), +): + return first_arg + second_arg + first_kwarg + second_kwarg + + +@torch.jit.script +def script_rpc_async_call( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + ret = fut.wait() + return ret + + +@torch.jit.script +def rpc_async_call_with_timeout( + dst_worker_name: str, + args: Tuple[Tensor, Tensor], + kwargs: Dict[str, Tensor], + timeout: float, +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout) + ret = fut.wait() + return ret + + +@torch.jit.script +def rpc_async_call_with_timeout_future_ret( + dst_worker_name: str, + args: Tuple[Tensor, Tensor], + kwargs: Dict[str, Tensor], + timeout: float, +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout) + return fut + + +@torch.jit.script +def rpc_async_call_future_ret( + dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor] +): + fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs) + return fut + +@torch.jit.script +def rref_to_here(rref_var: RRef[Tensor]) -> Tensor: + return rref_var.to_here() + +@torch.jit.script +def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor: + return rref_var.to_here(timeout) + +@torch.jit.script +def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor: + fut = rpc.rpc_async(dst_worker_name, rref_to_here, args) + ret = fut.wait() + return ret + + +class JitFaultyAgentRpcTest(RpcAgentTestFixture): + """ + Run tests for rpc_async in JIT under the faulty agent test fixture to test + arbitrary timeouts. + """ + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_timeout_in_torchscript_function(self): + # Call rpc_async + fut.wait() in torchscript function and ensure that + # timeout is raised. + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + expected_error = self.get_timeout_error_regex() + # Ensure that we get a timeout if we override the default timeout and + # the RPC takes longer to execute. + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5) + + # Ensure that we timeout if we don't specify a timeout but the default + # is less than the RPC takes to execute. + rpc._set_rpc_timeout(0.001) + with self.assertRaisesRegex(RuntimeError, expected_error): + script_rpc_async_call( + dst_worker_name, args, kwargs + ) + + # Ensure that we run to completion if zero timeout is specified. + ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0) + self.assertEqual(ret, torch.tensor([8, 8])) + # reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5}) + def test_timeout_in_python(self): + # Ensures timeouts are raised if we call rpc_async from within a + # torchscript function, but wait on the future in python. + if self.rank != 0: + return + + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + args = (torch.tensor([1, 1]), torch.tensor([2, 2])) + kwargs = { + "first_kwarg": torch.tensor([2, 2]), + "second_kwarg": torch.tensor([3, 3]), + } + expected_error = self.get_timeout_error_regex() + + fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure timeout if we don't specify but the default is less than the + # RPC takes to execute. + rpc._set_rpc_timeout(0.001) + fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if zero timeout is specified + fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0) + result = fut.wait() + self.assertEqual(result, torch.tensor([8, 8])) + # reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_remote_timeout_to_here_in_jit(self): + # Test that calling to_here() in JIT will raise timeout error if + # rpc.remote failed. + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call to_here() within a ScriptFunction and ensure it raises + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref_to_here(rref) + + @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1}) + def test_rref_to_here_timeout_in_jit(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref_to_here_with_timeout(rref, 0.01) + + rref_to_here_with_timeout(rref, 100) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_rref_timeout_pickle_in_jit(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call RPC with RRef arg in JIT, which will go through JIT pickling and + # ensure error is raised. + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc_async_with_rref_arg(dst_worker, (rref, )) + + @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"]) + def test_rref_timeout_pickle_script_func(self): + # Similar to above test, but calls python rpc with script function. + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + rref = rpc.remote( + dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)) + ) + # Will ensure error handling callbacks are run. + wait_until_pending_futures_and_users_flushed() + # Call RPC with script function that takes RRef, ensure timeout during pickling + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, )) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..3a684b73d2f315a00465371fad3050a795251ddb --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py @@ -0,0 +1,63 @@ +# mypy: allow-untyped-defs + +import os +from abc import ABC, abstractmethod + +import torch.testing._internal.dist_utils + + +class RpcAgentTestFixture(ABC): + @property + def world_size(self) -> int: + return 4 + + @property + def init_method(self): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + master_addr = os.environ["MASTER_ADDR"] + master_port = os.environ["MASTER_PORT"] + return f"tcp://{master_addr}:{master_port}" + else: + return self.file_init_method + + @property + def file_init_method(self): + return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format( + file_name=self.file_name + ) + + @property + @abstractmethod + def rpc_backend(self): + pass + + @property + @abstractmethod + def rpc_backend_options(self): + pass + + def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027 + """Method used by dist_init to prepare the faulty agent. + + Does nothing for other agents. + """ + + # Shutdown sequence is not well defined, so we may see any of the following + # errors when running tests that simulate errors via a shutdown on the + # remote end. + @abstractmethod + def get_shutdown_error_regex(self): + """ + Return various error message we may see from RPC agents while running + tests that check for failures. This function is used to match against + possible errors to ensure failures were raised properly. + """ + + @abstractmethod + def get_timeout_error_regex(self): + """ + Returns a partial string indicating the error we should receive when an + RPC has timed out. Useful for use with assertRaisesRegex() to ensure we + have the right errors during timeout. + """ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..413f97d94eb28115b22e4572b5b04a98f1d40a68 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -0,0 +1,6495 @@ +# mypy: allow-untyped-defs + +import concurrent.futures +import contextlib +import json +import os +import sys +import threading +import time + +from collections import namedtuple +from functools import partial +from threading import Event +from threading import Lock +from unittest import mock + +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.distributed.rpc as rpc +import torch.distributed.autograd as dist_autograd +from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo +from torch.distributed.rpc.api import _use_rpc_pickler, _thread_local_var, _wait_all +from torch.distributed.rpc.internal import ( + PythonUDF, + RPCExecMode, + _internal_rpc_pickler, + _build_rpc_profiling_key, +) +from torch.futures import Future +from torch.testing._internal.common_distributed import ( + skip_if_lt_x_gpu, + captured_output, + tp_transports, +) +from torch.testing._internal.common_utils import ( + IS_MACOS, + load_tests, + skip_but_pass_in_sandcastle_if, + get_cycles_per_ms, +) + +from torch.testing._internal.dist_utils import ( + dist_init, + get_function_event, + initialize_pg, + wait_until_node_failure, + wait_until_pending_futures_and_users_flushed, + wait_until_owners_and_forks_on_rank, + worker_name, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_utils import TemporaryFileName + +from torch.autograd.profiler_legacy import profile as _profile +import operator + + +def foo_add(): + return torch.add(torch.ones(1), torch.ones(1)) + +def udf_with_torch_ops(device=-1, use_record_function=False): + device_ctx = contextlib.nullcontext() if device == -1 else torch.cuda.device(device) + record_function_ctx = ( + torch.autograd.profiler.record_function("##forward##") + if use_record_function + else contextlib.nullcontext() + ) + with device_ctx, record_function_ctx: + t1, t2 = torch.ones(1), torch.ones(1) + t = torch.add(t1, t2) + t = torch.mul(t, t) + t = t.relu() + t = t.sigmoid() + +# Events (operator invocations) that are expected to be ran as part of the above +# function. +EXPECTED_REMOTE_EVENTS = [ + "aten::ones", + "aten::ones", + "aten::add", + "aten::mul", + "aten::relu", + "aten::clamp_min", + "aten::sigmoid", +] + +# Remote operations are prefixed with the following string for RPC profiling. +REMOTE_OP_STR = "#remote_op: " + + +VALUE_FUTURE = concurrent.futures.Future() +DONE_FUTURE = concurrent.futures.Future() + +FIFTY_MIL_CYCLES = 50000000 + +_rpc_barrier_count = 0 + +def _increment_count(): + global _rpc_barrier_count + _rpc_barrier_count += 1 + +def _reset_count(): + global _rpc_barrier_count + _rpc_barrier_count = 0 + +class StubRpcAgent: + def __init__(self, world_size): + self.world_size = world_size + + def get_worker_infos(self): + return { + WorkerInfo(name=worker_name(rank), id=rank) + for rank in range(self.world_size) + } + + +def _stub_construct_rpc_backend_options_handler(**kwargs): + return mock.Mock() # RpcBackendOptions. + + +def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options): + return StubRpcAgent(world_size=world_size) + + +def set_value(value): + VALUE_FUTURE.set_result(value) + + +def wait_for_value_future(): + return VALUE_FUTURE.result() + + +def set_and_check_done(value): + VALUE_FUTURE.set_result(value) + return DONE_FUTURE.result() + + +# it is used to test python user defined function over rpc +# classes and functions are used to test python user defined class and +# methods over rpc +TensorClass = namedtuple("TensorClass", ["tensors"]) + +class MyPickleClass: + def __init__(self) -> None: + self.t = None + + def __getstate__(self): + (pickled_python_udf, tensors) = _internal_rpc_pickler.serialize( + PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None) + ) + return (pickled_python_udf, tensors) + + def __setstate__(self, obj): + python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1]) + result = python_udf.func(python_udf.args[0], python_udf.args[1]) + self.t = result + + def set(self, val): + self.t = val + + +class SlowPickleClass: + def __init__(self, t): + self.t = t + + def __getstate__(self): + time.sleep(self.t) + return (self.t, ) + + def __setstate__(self, obj): + self.t = obj[0] + time.sleep(self.t) + + +class MyClass: + def __init__(self, a, delay=False): + self.a = a + # delay initialization to simulate errors if specified + if delay: + time.sleep(2) + + def my_instance_method(self, b): + return self.a + b + + @classmethod + def my_class_method(cls, d, e): + return d + e + + @staticmethod + def my_static_method(f): + return f > 10 + + def increment_value(self, increment): + self.a += increment + + def get_value(self): + return self.a + + def my_slow_method(self, my_tensor_arg): + time.sleep(5) + return torch.add(self.a, my_tensor_arg) + + +def _call_method_on_rref(method, rref, *args, **kwargs): + return method(rref.local_value(), *args, **kwargs) + + +def get_rref_list(values): + return [RRef(MyClass(a)) for a in values] + + +def add_rref_to_value(rref, value): + return rref.to_here() + value + + +def run_nested_pickle(pickle_cls_instance, tensor): + return pickle_cls_instance.t + tensor + +def build_sparse_tensor(coalesce=False): + i = [[0, 1, 1], [2, 0, 2]] + v = [3, 4, 5] + tensor = torch.sparse_coo_tensor(i, v, (2, 3)) + if coalesce: + tensor = tensor.coalesce() + return tensor + +def build_complex_tensors(): + a = torch.ones(3, 3) + b = [a, a] + c = [b, b] + d = [a, b] + e = {a: d} + return [a, b, c, d, e] + +def non_cont_test(t_view, t_cont): + if t_view.is_contiguous(): + raise Exception('t_view is contiguous!') # noqa: TRY002 + if not t_cont.is_contiguous(): + raise Exception('t_cont is not contiguous!') # noqa: TRY002 + if not torch.equal(t_view, t_cont): + raise Exception('t_view is not equal to t_cont!') # noqa: TRY002 + return t_view + +def my_function(a, b, c): + return a + b + c + + +def my_tensor_function(a, b): + return a + b + +def my_container_sum(a): + result = a[0] + for tensor in a[1:]: + result += tensor + return result + + +def my_sleep_func(seconds=1): + time.sleep(seconds) + return torch.mul(torch.tensor(1), torch.tensor(1)) + + +def my_complex_tensor_function(list_input, tensor_class_input, dict_input): + res = list_input[0] + for t in list_input: + res += t + for v in dict_input.values(): + res += v + complex_tensors = tensor_class_input.tensors + return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2]) + + +def my_rref_function(rref_a, rref_b): + return rref_a.to_here() + rref_b.to_here() + + +def delayed_add(a, b, seconds=0.05): + time.sleep(seconds) + return a + b + + +def identity(a): + return a + +def no_result(): + print("do nothing") + +def raise_or_inc(value): + if value.numel() == 2: + raise ValueError("Expected error") + return value + 1 + +def nested_rpc(dst): + return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def nested_rpc_sparse(dst): + return rpc.rpc_sync( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ) + + +def multi_layer_nested_async_rpc(dst, world_size, ttl): + # this method returns immediately without blocking the callee, but will + # generate additional requests. + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + rpc.rpc_async( + current_dst, + multi_layer_nested_async_rpc, + args=(next_dst, world_size, ttl - 1), + ) + return 0 + + +def nested_rref(dst): + return ( + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)), + rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)), + ) + + +def nested_rref_sparse(dst): + return ( + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + rpc.remote( + dst, + torch.add, + args=(build_sparse_tensor(), build_sparse_tensor()) + ), + ) + + +def nested_remote(dst): + rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3)) + return rref.to_here() + +def nested_remote_sparse(dst): + rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())) + return rref.to_here() + + +def rref_forward_chain(dst, world_size, rref, ttl): + if ttl > 0: + current_dst = worker_name(dst) + next_dst = (dst + 1) % world_size + ret_rref = rpc.remote( + current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1) + ) + return [ret_rref] + else: + return rref.to_here() + + +def rpc_return_rref(dst): + return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)) + + +def light_rpc(): + return 0 + + +def heavy_rpc(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +def heavy_rpc_sparse(tensor): + for i in range(1, 100): + tensor *= i + tensor = tensor / (i + 1) + return 0 + +@torch.jit.script +def heavy_rpc_torchscript(tensor): + for i in range(1, 100): + tensor *= i + tensor /= i + 1 + return 0 + + +@torch.jit.script +def my_script_func(tensor): + return torch.add(tensor, tensor) + + +expected_err = "Expected error" + +# Note that it needs to inherit from Exception, not BaseException. See comment +# in rpc/internal.py +class CustomException(Exception): + def __init__(self, bool, msg): + self.bool = bool + super().__init__(msg) + +def raise_func(): + raise ValueError(expected_err) + +def custom_raise_func(): + raise CustomException(True, "foo") + +@torch.jit.script +def raise_func_script(expected_err: str) -> torch.Tensor: + raise ValueError(expected_err) + +expected_err_escape = "\nFirst line of error \n next line of error \n last line of error" +def raise_func_escape(): + raise ValueError(expected_err_escape) + + +global_rref = None + + +def set_global_rref(rref): + global global_rref + global_rref = rref + + +def clear_global_rref(): + global global_rref + global_rref = None + + +def check_rref_confirmed(rref): + return rref.confirmed_by_owner() + + +def get_rref_debug_info(): + return _rref_context_get_debug_info() + + +def add_use_future_cb(to, x, y, z): + out = concurrent.futures.Future() + + def callback(fut): + out.set_result(fut.wait() + z) + + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(callback) + return out.result() + + +def get_events_from_profile(profile_rref): + return profile_rref.local_value().process_global_function_events + + +def add_use_future_set_result(to, x, y, z): + out = torch.futures.Future() + fut = rpc.rpc_async(to, torch.add, args=(x, y)) + fut.then(lambda fut : out.set_result(fut.wait() + z)) + return out.wait() + + +def add_use_future_nested_cb(to, x, y, z): + out = torch.futures.Future() + + def callback(fut1): + fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z)) + fut2.then(lambda fut2 : out.set_result(fut2.wait())) + + fut1 = rpc.rpc_async(to, torch.add, args=(x, y)) + fut1.then(callback) + return out.wait() + + +def fail_on_fut(fut): + pass + + +@rpc.functions.async_execution +def async_raise_func(): + raise RuntimeError("Expected error") + + +@rpc.functions.async_execution +def async_wrong_type(): + return torch.zeros(2, 2) + + +@rpc.functions.async_execution +def async_add(to, x, y): + return rpc.rpc_async(to, torch.add, args=(x, y)) + + +def slow_add(x, y, device="cpu"): + time.sleep(1) + x = x.to(device) + y = y.to(device) + return torch.add(x, y).cpu() + + +@rpc.functions.async_execution +def slow_async_add(to, x, y, device="cpu"): + return rpc.rpc_async(to, slow_add, args=(x, y, device)) + + +@rpc.functions.async_execution +def async_add_with_future_ctor(to, x, y, z): + fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut1: fut.set_result(fut1.wait() + z) + ) + return fut + + +@rpc.functions.async_execution +def async_add_chained(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_chained_multi(to, x, num, step): + fut = rpc.rpc_async(to, torch.add, args=(x, 0)) + for _ in range(num): + fut = fut.then(lambda fut: fut.wait() + step) + return fut + + +@rpc.functions.async_execution +def async_add_nested(to, x, y, z): + return rpc.rpc_async(to, async_add, args=(to, x, y)).then( + lambda fut: fut.wait() + z + ) + + +@rpc.functions.async_execution +def async_add_multi_fanout(to, x, num, step): + futs = [] + for i in range(num): + if i == 0: + futs.append(rpc.rpc_async(to, torch.add, args=(x, step))) + else: + futs.append(rpc.rpc_async(to, torch.add, args=(0, step))) + + # TODO: use torch.futures.collect_all + lock = Lock() + state = {"cnt": 0, "ret": torch.zeros_like(x)} + ret_future = torch.futures.Future() + + def inc_and_set(fut): + with lock: + state["cnt"] += 1 + state["ret"] += fut.wait() + if state["cnt"] >= len(futs): + ret_future.set_result(state["ret"]) + + for fut in futs: + fut.then(inc_and_set) + + return ret_future + + +@rpc.functions.async_execution +def async_cuda_sleep_and_set_to_one(t): + device = t.device + original_stream = torch.cuda.current_stream(device) + new_stream = torch.cuda.Stream(device) + new_stream.wait_stream(original_stream) + with torch.cuda.stream(new_stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + t.fill_(1) + fut = Future(devices=[device]) + fut.set_result(t) + return fut + + +@rpc.functions.async_execution +def async_cuda_nested_add(to, x, y, z): + def cb(fut): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + return fut.value() + z + + return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb) + + +# A custom Python class that contains a tensor, needed to see if we correctly +# use the Python pickler to extract tensors from non-IValue-convertible types. +class TensorWrapper: + __slots__ = ("tensor", "lock", "event", "thread") + + def __init__(self, t): + self.tensor = t + # Add one non-picklable field, to ensure it's ignored/skipped. + self.lock = Lock() + self.event = torch.cuda.Event(enable_timing=True) + self.thread = threading.Thread() + self.thread.start() + + def increase(self, v): + with self.lock: + self.tensor += v + + def sum(self): + with self.lock: + self.event.record() + return self.tensor.sum() + + +class AsyncExecutionClass: + + @staticmethod + @rpc.functions.async_execution + def static_async_add(to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + @classmethod + @rpc.functions.async_execution + def class_async_add(cls, to, x, y, z): + ret_fut = torch.futures.Future() + rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: ret_fut.set_result(fut.wait() + z) + ) + return ret_fut + + @rpc.functions.async_execution + def bound_async_add(self, to, x, y, z): + return rpc.rpc_async(to, torch.add, args=(x, y)).then( + lambda fut: fut.wait() + z + ) + + +def return_future(): + return torch.futures.Future() + + +class FooBackendOptions(rpc.RpcBackendOptions): + def __init__(self, init_method): + # Must call the __init__ of the superclass (and do so directly, + # without using super()) because... pybind. + rpc.RpcBackendOptions.__init__(self) + self.init_method = init_method + + +# load_tests from common_utils is used to automatically filter tests for +# sharding on sandcastle. This line silences flake warnings +load_tests = load_tests + + +class MyEmbeddingBagModel(torch.nn.Module): + def __init__(self, sparse): + super().__init__() + self.eb = torch.nn.EmbeddingBag( + 10, + 10, + sparse=sparse + ) + + def forward(self, x): + return self.eb(x) + + +class MyParameterServer: + def __init__(self, trainers): + self.lock = Lock() + self.trainers = trainers + self.iteration = 0 + self.updates = 0 + self.futures = [] + self.total = None + self.gradient = None + + @staticmethod + def get_gradient(rref): + return rref.local_value().gradient + + @staticmethod + @rpc.functions.async_execution + def average(rref, riteration, tensor): + self = rref.local_value() + fut = torch.futures.Future() + with self.lock: + if riteration > self.iteration: + self.iteration = riteration + self.updates = 0 + self.futures.clear() + self.futures.append(fut) + if self.total is None: + self.total = tensor + else: + self.total += tensor + self.updates += 1 + if self.trainers == self.updates: + self.gradient = self.total / float(self.trainers) + for fut in self.futures: + result = self.total / float(self.trainers) + fut.set_result(result) + return fut + + +class MyConvNetForMNIST(nn.Module): + def __init__(self, device): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(1, 16, 3, 1), + nn.ReLU(), + nn.Conv2d(16, 32, 3, 1), + nn.ReLU(), + nn.MaxPool2d(2), + nn.Flatten(1), + nn.Linear(4608, 128), + nn.ReLU(), + nn.Linear(128, 10), + ).to(device) + self.device = device + + def forward(self, x, is_rref=False): + x = x.to_here() if is_rref else x + with torch.cuda.stream(torch.cuda.current_stream(self.device)): + # intentionally adding delay to current CUDA stream + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + return self.net(x) + + def __getstate__(self): + # return an empty dict to avoid inspecting the model contents on the + # owner + return {} + + +class RpcTestCommon: + def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None): + if mode == RPCExecMode.SYNC: + return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs) + elif mode == RPCExecMode.ASYNC: + return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait() + elif mode == RPCExecMode.REMOTE: + return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here() + + def _self_py_udf_remote(self, worker_info, x, y, z): + rref = rpc.remote(worker_info, my_function, args=(x, y, z)) + self.assertEqual(rref.to_here(), x + y + z) + + def _self_remote_rref_as_rpc_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x)) + ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y)) + self.assertEqual(ret, x + y + z + x + y) + self.assertEqual(fut.wait(), x + y + z + x) + + def _self_remote_rref_as_remote_arg(self, dst, x, y, z): + self_worker_info = rpc.get_worker_info() + rref = rpc.remote(self_worker_info, my_function, args=(x, y, z)) + ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x)) + self.assertEqual( + ret_rref.to_here(), x + y + z + x + ) + + def _world_size_one(self, a, b): + if self.rank == 0: + rpc.init_rpc( + name="me", + backend=self.rpc_backend, + rank=0, + world_size=1, + rpc_backend_options=self.rpc_backend_options, + ) + + def _rpc_sync(x, y): + expect = x * 2 + result = rpc.rpc_sync( + "me", + my_tensor_function, + args=(x, y) + ) + self.assertEqual(expect, result) + + def _rpc_async(x, y): + expect = x * 2 + result = rpc.rpc_async( + "me", + my_tensor_function, + args=(x, y) + ).wait() + self.assertEqual(expect, result) + + def _remote(x, y): + expect = x * 2 + result = rpc.remote( + "me", + my_tensor_function, + args=(x, y) + ).to_here() + self.assertEqual(expect, result) + + _rpc_sync(a, b) + _rpc_async(a, b) + _remote(a, b) + + rpc.shutdown() + + def _multi_rpc(self, sparse): + dst_rank = (self.rank + 1) % self.world_size + for i in range(20): + n = i + self.rank + 1 + if sparse: + x = build_sparse_tensor() * n + y = build_sparse_tensor() * n + else: + x = torch.ones(2, 2) + y = torch.ones(2, 2) + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(ret, x * 2) + + def _run_uneven_workload(self, f, x, num_repeat=30): + # worker0 drives and waits for worker1 and worker2 + # throughout the test. + if self.rank == 0: + self.assertTrue(self.world_size >= 3) + + # Phase 1: Only worker1 has workload. + dst = "worker1" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for fut in torch.futures.collect_all(futs).wait(): + self.assertEqual(fut.wait(), 0) + + # Phase 2: Only worker2 has workload. + # If join is not correctly implemented, + # worker2 should be closed by now. + dst = "worker2" + futs = [] + for _ in range(num_repeat): + fut = rpc.rpc_async(dst, f, args=(x,)) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + + def _wait_all_workers(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _wait_all_workers_twice(self, f, x): + initialize_pg(self.file_init_method, self.rank, self.world_size) + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + self._run_uneven_workload(f, x) + + # worker0 calls this at the end after waiting for RPC responses. + # worker1/2 calls this immediately and has some works after it. + # worker3 calls this immediately and has no more work. + rpc.api._wait_all_workers() + rpc.api._wait_all_workers() + + # Wait before proceeding to shutdown to ensure worker0 RPCs make + # it through to other workers. + dist.barrier() + rpc.shutdown(graceful=False) + + def _nested_rpc(self, f, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + f, + args=(worker_name(self.rank),), + ) + self.assertEqual(ret, expected) + + def _stress_test_rpc(self, f, repeat=1000, args=()): + n = self.rank + 1 + dst_rank = n % self.world_size + futs = [] + tik = time.time() + for _ in range(repeat): + fut = rpc.rpc_async(worker_name(dst_rank), f, args=args) + futs.append(fut) + + for val in torch.futures.wait_all(futs): + self.assertEqual(val, 0) + tok = time.time() + print( + f"Rank {self.rank} finished testing {repeat} times in {tok - tik} seconds." + ) + + def _builtin_remote_ret(self, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.to_here(), expected) + + def _builtin_remote_self(self, x, y, expected): + rref = rpc.remote( + worker_name(self.rank), + torch.add, + args=(x, y), + ) + self.assertEqual(rref.local_value(), expected) + + def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}): + m = 10 + n = self.rank + 1 + dst_rank = n % self.world_size + rrefs = [] + expected = [] + for i in range(m): + n = n + i + rrefs.append( + rpc.remote( + worker_name(dst_rank), + fn, + args=args_fn(n, sparse), + kwargs=kwargs_fn(n, sparse), + ) + ) + expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse))) + + for i in range(m): + self.assertEqual(rrefs[i].to_here(), expected[i]) + + def _py_rref_args(self, a, b, x, y, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(a, b) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(x, y) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rref_args_user_share(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + owner_rank = n % self.world_size + user_rank = (n + 1) % self.world_size + rref_a = rpc.remote( + worker_name(owner_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(owner_rank), my_function, args=(x, y, z) + ) + rref_c = rpc.remote( + worker_name(user_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), expected) + + def _py_rpc_rref_args(self, a, b, c, x, y, z, expected): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), my_function, args=(a, b, c) + ) + rref_b = rpc.remote( + worker_name(dst_rank), my_function, args=(x, y, z) + ) + + c = rpc.rpc_sync( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(c, expected) + + def _nested_remote(self, f, expected): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + + rref = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), expected) + + def _nested_rref(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref_of_rrefs = rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + + # Say C has 2 OwnerRRefs. + # B has 2 UserRRefs to those 2 OwnerRRefs, respectively. + # This call is effectively A asking B to share its 2 UserRRefs. + rrefs = rref_of_rrefs.to_here() + + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _nested_rref_stress(self, f, expected1, expected2): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + all_rrefs = [] + for _ in range(20): + all_rrefs.append( + rpc.remote( + worker_name(dst_rank1), + f, + args=(worker_name(dst_rank2),), + ) + ) + + for i in range(20): + rref_of_rrefs = all_rrefs[i] + rrefs = rref_of_rrefs.to_here() + self.assertEqual(len(rrefs), 2) + self.assertEqual(rrefs[0].to_here(), expected1) + self.assertEqual(rrefs[1].to_here(), expected2) + + def _trainer_func(self, rref, sparse): + m = MyEmbeddingBagModel(sparse=sparse) + loss_fn = nn.MSELoss() + for i in range(10): + outputs = m(torch.rand(10, 10).long()) + loss_fn(outputs, torch.rand(10, 10)).backward() + gradient = next(iter(m.parameters())).grad + fut = rref.rpc_async().average(rref, i, gradient) + gradient = fut.wait() + if gradient.is_sparse: + gradient = gradient.to_dense().double() + ps_gradient = rref.rpc_sync().get_gradient(rref) + if ps_gradient.is_sparse: + ps_gradient = ps_gradient.to_dense().double() + self.assertTrue(torch.equal(gradient, ps_gradient)) + + def _my_parameter_server(self, sparse): + ps_rref = RRef(MyParameterServer(self.world_size - 1)) + futures = [] + for index in range(1, self.world_size): + futures.append( + rpc.rpc_async( + worker_name((self.rank + index) % self.world_size), + self._trainer_func, + args=( + ps_rref, + sparse + ), + ) + ) + torch.futures.wait_all(futures) + + def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor): + # We check proper CUDA stream synchronization by adding to the tensor + # in one stream to get the expected value, and reading it from another stream. + future = Future(devices=["cuda:0"]) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + another_stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + if sparse_tensor: + tensor = build_sparse_tensor().to("cuda:0") + add_tensor = build_sparse_tensor().to("cuda:0") + expected_tensor = (tensor + add_tensor).coalesce() + else: + tensor = torch.zeros((100,), device="cuda:0") + add_tensor = torch.ones((100,), device="cuda:0") + expected_tensor = tensor + add_tensor + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor += add_tensor + if sparse_tensor: + tensor = tensor.coalesce() + future.set_result(wrapper(tensor)) + with torch.cuda.stream(another_stream): + tensor = unwrapper(future.wait()) + if sparse_tensor: + self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item()) + self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item()) + self.assertEqual(tensor.size(), expected_tensor.size()) + else: + self.assertTrue(torch.eq(tensor, expected_tensor).all().item()) + + +class RpcTest(RpcAgentTestFixture, RpcTestCommon): + @dist_init + def test_worker_id(self): + n = self.rank + 1 + peer_rank = n % self.world_size + self_worker_info = rpc.get_worker_info() + peer_worker_info = rpc.get_worker_info(worker_name(peer_rank)) + + self.assertEqual(self_worker_info.name, worker_name(self.rank)) + self.assertEqual(peer_worker_info.name, worker_name(peer_rank)) + + with self.assertRaisesRegex(RuntimeError, "could not find destination"): + unknown_worker_id = rpc.get_worker_info("WorkerUnknown") + + @dist_init + def test_get_worker_infos(self): + worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos() + + worker_names = {worker_info.name for worker_info in worker_infos} + expected_worker_names = { + worker_name(rank) for rank in range(self.world_size) + } + self.assertEqual(worker_names, expected_worker_names) + + worker_ids = {worker_info.id for worker_info in worker_infos} + expected_worker_ids = set(range(self.world_size)) + self.assertEqual(worker_ids, expected_worker_ids) + + @dist_init + def test_self_add(self): + self_worker_info = rpc.get_worker_info() + self_worker_name = worker_name(self.rank) + fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + @dist_init + def test_send_to_rank(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test dense tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test invalid ranks + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(RuntimeError): + self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + with self.assertRaises(ValueError): + self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1)) + + @dist_init + def test_self_py_udf_remote(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_remote_arg(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + torch.ones(2, 2), + 1, + 3 + ) + + @dist_init + def test_rref_proxy_non_exist(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + msg = "has no attribute 'non_exist'" + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_sync().non_exist() + + with self.assertRaisesRegex(AttributeError, msg): + rref.rpc_async().non_exist().wait() + + with self.assertRaisesRegex(AttributeError, msg): + rref.remote().non_exist() + + def _test_rref_proxy_tensor(self, dst): + rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3)) + + expected = torch.ones(2, 2) + 1 + 3 + self.assertEqual(expected.size(), rref.rpc_sync().size()) + self.assertEqual(expected + 1, rref.rpc_async().add(1).wait()) + self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here()) + + @dist_init + def test_rref_proxy_tensor(self): + self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_tensor_self(self): + self._test_rref_proxy_tensor(rpc.get_worker_info()) + + @dist_init + def test_rref_proxy_reuse(self): + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + my_function, + args=(torch.ones(2, 2), 1, 3) + ) + expected = torch.ones(2, 2) + 1 + 3 + + proxy_rpc_sync = rref.rpc_sync() + proxy_rpc_async = rref.rpc_async() + proxy_remote = rref.remote() + + self.assertEqual(expected.size(), proxy_rpc_sync.size()) + self.assertEqual(expected + 1, proxy_rpc_sync.add(1)) + self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4)) + + self.assertEqual(expected.size(), proxy_rpc_async.size().wait()) + self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait()) + self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait()) + + self.assertEqual(expected.size(), proxy_remote.size().to_here()) + self.assertEqual(expected + 5, proxy_remote.add(5).to_here()) + self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here()) + + def _test_rref_proxy_class(self, dst): + rref = rpc.remote(dst, MyClass, args=(7,)) + expected = MyClass(7) + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + expected.increment_value(3) + self.assertEqual(None, rref.rpc_sync().increment_value(1)) + self.assertEqual(None, rref.rpc_async().increment_value(1).wait()) + self.assertEqual(None, rref.remote().increment_value(1).to_here()) + + self.assertEqual(expected.get_value(), rref.rpc_sync().get_value()) + self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait()) + self.assertEqual(expected.get_value(), rref.remote().get_value().to_here()) + + self.assertEqual( + expected.my_instance_method(2), + rref.rpc_sync().my_instance_method(2) + ) + self.assertEqual( + expected.my_instance_method(3), + rref.rpc_async().my_instance_method(3).wait() + ) + self.assertEqual( + expected.my_instance_method(4), + rref.remote().my_instance_method(4).to_here() + ) + + self.assertEqual( + expected.my_static_method(9), + rref.rpc_sync().my_static_method(9) + ) + self.assertEqual( + expected.my_static_method(10), + rref.rpc_async().my_static_method(10).wait() + ) + self.assertEqual( + expected.my_static_method(11), + rref.remote().my_static_method(11).to_here() + ) + + self.assertEqual( + expected.my_class_method(2, torch.zeros(2, 2)), + rref.rpc_sync().my_class_method(2, torch.zeros(2, 2)) + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(3, 3)), + rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait() + ) + self.assertEqual( + expected.my_class_method(2, torch.ones(4, 4)), + rref.remote().my_class_method(2, torch.ones(4, 4)).to_here() + ) + + @dist_init + def test_rref_proxy_class(self): + self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size)) + + @dist_init + def test_rref_proxy_class_self(self): + self._test_rref_proxy_class(rpc.get_worker_info()) + + @mock.patch.object(torch.distributed.autograd, "_init") + @mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent") + @dist_init(setup_rpc=False) + def test_register_rpc_backend_and_set_and_start_rpc_backend( + self, mock_rpc_agent, mock_dist_autograd_init + ): + backend_name = "stub_backend" + + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + with self.assertRaisesRegex( + RuntimeError, "^RPC backend .+: already registered$" + ): + backend = rpc.backend_registry.register_backend( + backend_name, + _stub_construct_rpc_backend_options_handler, + _stub_init_rpc_backend_handler, + ) + + rpc.init_rpc( + name="worker1", + backend=backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + store, _, _ = next( + torch.distributed.rendezvous( + self.init_method, rank=self.rank, world_size=self.world_size + ) + ) + rpc._init_rpc_backend( + backend=self.rpc_backend, + store=store, + name="duplicate_name", + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_duplicate_name_2(self): + with self.assertRaisesRegex(RuntimeError, "is not unique"): + rpc.init_rpc( + name=worker_name(self.rank % (self.world_size - 1)), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_reinit(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + # TODO: with TCP init, rank 0 raises Address already in use because + # rank 0 is the start daemon and the store is created before checking if + # RPC is already initialized in init_rpc. + if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0: + expected_reinit_err = "Address already in use" + else: + expected_reinit_err = "is already initialized" + + with self.assertRaisesRegex(RuntimeError, expected_reinit_err): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_pg_init_no_rpc_init(self): + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = torch.nn.Linear(3, 4) + + def forward(self, x): + return self.lin(x) + + model = MyModel() + model.train() + model = torch.nn.parallel.DistributedDataParallel(model) + + with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'): + params = [] + for param in model.parameters(): + params.append(RRef(param)) + + def test_world_size_one(self): + self._world_size_one( + torch.ones(2, 2), + torch.ones(2, 2) + ) + + @dist_init(setup_rpc=False) + def test_invalid_names(self): + + worker_id = 0 + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo("abc*", worker_id) + + with self.assertRaisesRegex(RuntimeError, "Worker name must match"): + info = WorkerInfo(" ", worker_id) + + with self.assertRaisesRegex(RuntimeError, "must be non-empty"): + info = WorkerInfo("", worker_id) + + # If the number in the message does not match, it is likely that the + # value of MAX_NAME_LEN in RPC WorkerInfo has changed. + with self.assertRaisesRegex(RuntimeError, "shorter than 128"): + info = WorkerInfo("".join(["a" for i in range(500)]), worker_id) + + # Test that WorkerInfo can be pickled and sent in RPC call + @dist_init + def test_worker_info_pickle(self): + dst_rank = (self.rank + 1) % self.world_size + worker_info = rpc.api.get_worker_info() + ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,)) + self.assertEqual(ret, worker_info) + + @dist_init + def test_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @staticmethod + def return_callee_id(): + return rpc.get_worker_info().id + + @dist_init + def test_int_callee(self): + dst_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id) + self.assertEqual(ret, dst_rank) + + @dist_init + def test_add_with_id(self): + n = self.rank + 1 + dst_rank = n % self.world_size + workder_info = rpc.get_worker_info(worker_name(dst_rank)) + + ret = rpc.rpc_sync( + workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n)) + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + + @dist_init + def test_scalar_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n) + ) + self.assertEqual(ret, (torch.ones(n, n) + n)) + + @dist_init + def test_async_add(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_nonzero(self): + n = self.rank + 1 + dst_rank = n % self.world_size + x = torch.ones(self.world_size, self.world_size) + x[self.rank][self.rank] = 0 + ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,)) + self.assertEqual(ret, x.nonzero()) + + @dist_init + def test_multi_rpc(self): + self._multi_rpc(False) + + @dist_init + def test_future_wait_twice(self): + dst = worker_name((self.rank + 1) % self.world_size) + futs = [] + for i in range(20): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + torch.futures.wait_all(futs) + + for fut in futs: + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + @dist_init(setup_rpc=False) + def test_wait_all_workers_timeout(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + og_func = rpc.api._wait_all_workers + + def wait_all_workers_sleep(timeout): + rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout) + + rpc.api._wait_all_workers = wait_all_workers_sleep + + try: + with self.assertRaisesRegex(RuntimeError, ''): + rpc.shutdown(graceful=True, timeout=0.01) + finally: + rpc.api._wait_all_workers = og_func + dist.barrier() + + def test_wait_all_workers_dense(self): + self._wait_all_workers(heavy_rpc, torch.ones(100, 100)) + + def test_wait_all_workers_twice_dense(self): + self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100)) + + @dist_init + def test_all_gather(self): + info = rpc.get_worker_info() + results = rpc.api._all_gather(info.id) + expected = {} + for info in rpc._get_current_rpc_agent().get_worker_infos(): + expected[info.name] = info.id + + self.assertEqual(expected, results) + + @dist_init + def test_all_gather_timeout(self): + rpc._set_rpc_timeout(0.1) + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, + "timed out in _all_gather after 0\\.10 seconds" + ): + rpc.api._all_gather(SlowPickleClass(0.5)) + else: + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.api._all_gather(SlowPickleClass(0.5)) + + def _test_barrier_helper(self, info, names, multi_threaded=False): + names = sorted(names) + leader = names[0] + rpc.rpc_sync(leader, _reset_count) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, 0) + rpc.api._barrier(names) + rpc.rpc_sync(leader, _increment_count) + rpc.api._barrier(names) + if not multi_threaded and info.name == leader: + self.assertEqual(_rpc_barrier_count, len(names)) + + @dist_init + def test_rpc_barrier_all(self): + # Test rpc barrier when called with full list of workers + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_subset(self): + # Test rpc barrier when processes are called with different subsets of the full list + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [worker.name for worker in all_worker_info if not worker.id % 2] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_partial_subset(self): + # Test rpc barrier when some processes are not involved in the barrier + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + if info.id % 2: + names = [worker.name for worker in all_worker_info if worker.id % 2] + else: + names = [f"worker{info.id}"] + self._test_barrier_helper(info, names) + + @dist_init + def test_rpc_barrier_multithreaded(self): + # This tests validates the implementation of barrier when multiple threads call into it + # We only need to check that it does not hang in this case + info = rpc.get_worker_info() + all_worker_info = rpc._get_current_rpc_agent().get_worker_infos() + names = [worker.name for worker in all_worker_info] + threads = [] + for _ in range(3): + th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True)) + threads.append(th) + th.start() + for th in threads: + th.join() + + @dist_init + def test_graceful_shutdown_with_uneven_workload(self): + """Test graceful termination.""" + self._run_uneven_workload(heavy_rpc, torch.ones(100, 100)) + + @dist_init(setup_rpc=False) + def test_shutdown_followed_by_rpc(self): + # Initialize RPC. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, torch.ones(n, n) * 2) + rpc.shutdown() + + with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"): + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + + @dist_init + def test_expected_src(self): + dst_rank = (self.rank + 1) % self.world_size + expected_src_rank = (self.rank - 1) % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,)) + value = VALUE_FUTURE.result() + self.assertEqual(value, expected_src_rank) + + @dist_init + def test_py_built_in(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2)) + self.assertEqual(ret, min(n, n + 1, n + 2)) + + @dist_init + def test_py_user_defined(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(ret, my_function(n, n + 1, n + 2)) + + def test_build_rpc_profiling_key(self): + # Tests that the name that shows up as an Event in profiling RPCs has all + # the necessary information. + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + rpc_profiling_key = _build_rpc_profiling_key( + exec_mode, "foo", "worker0", "worker1" + ) + self.assertIn(exec_mode.value, rpc_profiling_key) + self.assertIn("foo", rpc_profiling_key) + self.assertIn("worker0", rpc_profiling_key) + self.assertIn("worker1", rpc_profiling_key) + + def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode): + self.assertTrue(self_worker_name in rpc_event.name) + self.assertTrue(dst_worker_name in rpc_event.name) + if isinstance(func, torch.jit.ScriptFunction): + self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name) + else: + self.assertTrue(func.__name__ in rpc_event.name) + self.assertTrue(rpc_exec_mode.value in rpc_event.name) + self.assertEqual(rpc_event.count, 1) + + @dist_init + def test_profiler_rpc_record_shapes(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + t1, t2 = torch.ones(100), torch.ones(100) + with _profile(record_shapes=True) as prof: + rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2)) + + function_events = prof.function_events + remote_events = [event for event in function_events if event.is_remote] + remote_add_event = next( + event for event in remote_events if "aten::add" in event.name + ) + remote_add_input_shapes = remote_add_event.input_shapes + # Run profiler on equivalent local op and validate shapes are the same. + with _profile(record_shapes=True) as prof: + torch.add(t1, t2) + + local_function_events = prof.function_events + local_add_event = next( + event for event in local_function_events if "aten::add" in event.name + ) + local_add_input_shapes = local_add_event.input_shapes + self.assertEqual(remote_add_input_shapes, local_add_input_shapes) + + @dist_init + def test_profiler_rpc_memory(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile(profile_memory=True) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + # if cpu_memory_usage was not propagated over the wire, this set would + # only contain 0 (indicates no memory being profiled) + self.assertNotEqual({0}, event_cpu_mem_usages) + # No memory profiled if profile_memory=False + with _profile(profile_memory=False) as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + function_events = p.function_events + event_cpu_mem_usages = {event.cpu_memory_usage for event in function_events} + self.assertEqual({0}, event_cpu_mem_usages) + + @dist_init + def test_profiler_export_trace(self): + if self.rank != 1: + return + dst = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst) + with _profile() as p: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + res = fut.wait() + + events = p.function_events + with TemporaryFileName() as fname: + path = fname + p.export_chrome_trace(path) + with open(path) as f: + trace = json.load(f) + event_names = [event['name'] for event in trace] + for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]: + event_exists = any(expected_event_name in event_name for event_name in event_names) + self.assertTrue(event_exists) + + @dist_init + def test_profiler_rpc_key_names(self): + # tests that remote events are properly prefixed with the RPC profiling key. + if self.rank != 1: + return + + # Spawn multiple threads that send RPCs to ensure keys are correctly + # prefixed when there are multiple RPCs being created/in flight at the + # same time. + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + + def rpc_with_profiling(dst_worker): + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + fut.wait() + + events = prof.function_events + remote_event_names = { + event.name: event for event in events if event.is_remote + } + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + dst_worker, + ) + + remote_event_name_set = set(EXPECTED_REMOTE_EVENTS) + for name, event in remote_event_names.items(): + # Ensure that we have the expected key as part of the remote + # event. + self.assertTrue(name.startswith(rpc_profiling_key)) + self.assertTrue(event.is_remote) + self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id) + # Ensure that the remote event name also contains the operator. + operator_name_substr = name[len(rpc_profiling_key) :] + # Note: we don't assert that every remote event needs to be + # in the above set, the set is just a representative set of + # what we expect to see. The profiler can change and add more + # events, but we should always expect to see this representative + # set. + matching_event = { + remote_event_name + for remote_event_name in remote_event_name_set + if remote_event_name in operator_name_substr + } + remote_event_name_set -= matching_event + + # The set should be empty, otherwise its contained elements did + # not show up in the remote profiler output. + self.assertTrue( + remote_event_name_set == set(), + f"Expected {remote_event_name_set} to be included in remote profiler output.", + ) + + for dst in dst_ranks: + dst_worker = worker_name(dst) + num_parallel_rpcs = 2 + with concurrent.futures.ThreadPoolExecutor( + max_workers=num_parallel_rpcs + ) as executor: + futs = [ + executor.submit(rpc_with_profiling, dst_worker) + for _ in range(num_parallel_rpcs) + ] + # Wait for workers to finish test + for fut in futs: + fut.result() + + def _run_test_profiler_remote_events_profiled(self): + # Tests that we can successfully invoke the profiler on a remote node, + # and collect the remote events back in the local profiler. + if self.rank != 1: + return + + dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank] + for dst in dst_ranks: + dst_worker = worker_name(dst) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=()) + ret = fut.wait() + + events = prof.function_events + + rpc_event = get_function_event(events, RPCExecMode.ASYNC.value) + self.check_profiling_info( + worker_name(self.rank), + dst_worker, + udf_with_torch_ops, + rpc_event, + RPCExecMode.ASYNC, + ) + + remote_events = {event.name: event for event in events if event.is_remote} + rpc_profiling_key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + udf_with_torch_ops.__qualname__, + worker_name(self.rank), + worker_name(dst), + ) + + for expected_remote_event_name in EXPECTED_REMOTE_EVENTS: + expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name + self.assertTrue(expected_key in remote_events) + remote_event = remote_events[expected_key] + # Remote event should have a node ID corresponding to the worker + # it ran on. + self.assertEqual(remote_event.node_id, dst) + + # Validate order remote events show up in profiling output. + def convert_remote_to_local(event_name): + remote_op_key = rpc_profiling_key + REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + + len(remote_op_key) : + ] + + remote_events_list = [ + convert_remote_to_local(event.name) + for event in events + if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS + ] + self.assertEqual( + set(remote_events_list), + set(EXPECTED_REMOTE_EVENTS), + f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}", + ) + + @dist_init + def test_profiler_remote_events_profiled(self): + self._run_test_profiler_remote_events_profiled() + + @dist_init + def test_profiler_remote_events_profiled_single_threaded(self): + self._run_test_profiler_remote_events_profiled() + + def run_profiling_workload(self, dst): + fut = rpc.rpc_async( + worker_name(dst), + torch.mul, + args=( + torch.tensor(1.0, requires_grad=True), + torch.tensor(1.0, requires_grad=True), + ), + ) + fut.wait() + + def _run_rpc_profiling_async_function(self, device="cpu"): + if self.rank != 1: + return + + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + with _profile() as prof: + ret = rpc.rpc_async( + dst1, slow_async_add, args=(dst2, x, y, device), timeout=20 + ) + out = ret.wait() + + function_events = prof.function_events + # slow_async_add resulted in an RPC from dst1 -> dst2, so this should be + # recorded. + key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1 + ) + + nested_rpc_key_prefix = _build_rpc_profiling_key( + RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2 + ) + expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix + remote_events = [event for event in function_events if event.is_remote] + rpc_remote_event = [ + event for event in remote_events if event.name == expected_key + ] + self.assertEqual(1, len(rpc_remote_event)) + rpc_remote_event = rpc_remote_event[0] + self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size) + # slow_async_add's RPC does an add on dst2, which should be reflected as well. + remote_add_key = ( + expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add) + ) + remote_add_event = [ + event for event in remote_events if event.name == remote_add_key + ] + self.assertEqual(1, len(remote_add_event)) + remote_add_event = remote_add_event[0] + # Validate that node_id is dst2. + self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size) + + @dist_init + def test_rpc_profiling_async_function(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_async_function_single_threaded(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + self._run_rpc_profiling_async_function() + if torch.cuda.is_available(): + dist.barrier() + self._run_rpc_profiling_async_function(device="cuda:0") + + @dist_init + def test_rpc_profiling_remote_record_function(self): + # test that functions run over RPC with record_function show the expected + # profiled block. + if self.rank != 1: + return + dst_ranks = [i for i in range(self.world_size) if i != self.rank] + for dst_rank in dst_ranks: + dst_worker = worker_name(dst_rank) + with _profile() as prof: + fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True)) + fut.wait() + + function_events = prof.function_events + record_function_remote_event = [ + evt for evt in function_events if "##forward##" in evt.name + ] + self.assertEqual(1, len(record_function_remote_event)) + record_function_remote_event = record_function_remote_event[0] + self.assertEqual(record_function_remote_event.node_id, dst_rank) + # cpu_children only returns direct children, so here we get all + # children recursively. + + def get_cpu_children(event): + if not event.cpu_children: + return [] + cpu_children = event.cpu_children + for e in event.cpu_children: + cpu_children.extend(get_cpu_children(e)) + return cpu_children + + remote_children = get_cpu_children(record_function_remote_event) + # Get local children and verify parity. + with _profile() as prof: + udf_with_torch_ops(-1, True) + + local_function_events = prof.function_events + local_record_function_event = next( + evt for evt in local_function_events if "##forward##" in evt.name + ) + local_children = get_cpu_children(local_record_function_event) + local_children_names = [ + evt.name for evt in local_children + ] + + REMOTE_OP_STR = "#remote_op: " + + def convert_remote_to_local(event_name): + remote_op_key = REMOTE_OP_STR + return event_name[ + event_name.find(remote_op_key) + len(remote_op_key) : + ] + + for evt in remote_children: + local_name = convert_remote_to_local(evt.name) + self.assertTrue(local_name in local_children_names) + + def validate_profiling_workload(self, dst, prof): + + def convert_remote_to_local(event_name): + return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :] + + events = prof.function_events + remote_events = { + convert_remote_to_local(event.name): event + for event in events + if event.is_remote + } + self.assertTrue("aten::mul" in remote_events) + remote_mul_event = remote_events["aten::mul"] + self.assertEqual(remote_mul_event.node_id, dst) + self.check_profiling_info( + worker_name(self.rank), + worker_name(dst), + torch.mul, + remote_mul_event, + RPCExecMode.ASYNC, + ) + + def _run_test_profiler_with_autograd_context(self): + dst = (self.rank + 1) % self.world_size + if self.rank == 1: + # Cases where we can double wrap messages with profiling information and autograd info. + with dist_autograd.context() as context_id: + with _profile() as prof: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + # Ensure that flipped order of ctx managers results in events being + # recorded as expected. + with _profile() as prof: + with dist_autograd.context() as context_id: + self.run_profiling_workload(dst) + + self.validate_profiling_workload(dst, prof) + + @dist_init + def test_profiler_with_autograd_context_single_threaded(self): + self._run_test_profiler_with_autograd_context() + + @dist_init + def test_profiler_with_autograd_context(self): + self._run_test_profiler_with_autograd_context() + + def _profiler_test_with_rpc( + self, rpc_exec_mode, func, args, use_record_function=False, dst=None, kineto_profile=False + ): + dst = dst if dst is not None else (self.rank + 1) % self.world_size + + # only run profiler on rank 1. + p = _profile if not kineto_profile else torch.profiler.profile # kineto + if self.rank == 1: + with p() as prof: + record_function_ctx_mgr = ( + contextlib.nullcontext() + if not use_record_function + else torch.autograd.profiler.record_function( + "foo" + ) + ) + with record_function_ctx_mgr as rf: + if rpc_exec_mode == RPCExecMode.SYNC: + rpc.rpc_sync(worker_name(dst), func, args=args) + elif rpc_exec_mode == RPCExecMode.ASYNC: + fut = rpc.rpc_async(worker_name(dst), func, args=args) + if kineto_profile: + # Ensure multiple async RPCs don't cause issues. + # Would have raised + # "RuntimeError: Cannot call + # RemoteProfilerManager::setCurrentKey when current + # key is already set." error if RPC profiling was + # not disabled properly for kineto. + fut2 = rpc.rpc_async(worker_name(dst), func, args=args) + fut2.wait() + fut.wait() + else: + self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE) + rref = rpc.remote(worker_name(dst), func, args=args) + rref.to_here() + # To avoid flakiness, wait for the RRef to be profiled. This + # means that we received the acknowledgement of successful + # creation on the owner and ran the callbacks responsible + # for recording the profiling event. + rref._get_profiling_future().wait() + + events = prof.function_events if not kineto_profile else prof.events() + if kineto_profile: + # RPC profiling is disabled so there should be no rpc related + # events. + with self.assertRaises(IndexError): + get_function_event(events, rpc_exec_mode.value) + + return + + rpc_event = get_function_event(events, rpc_exec_mode.value) + # verify Node ID for this rpc event. + self.assertEqual(rpc_event.node_id, self.rank) + # Ensure recording of remote events. + remote_events = {event for event in events if event.node_id == dst} - {rpc_event} + self.assertGreaterEqual(len(remote_events), 1) + for remote_event in remote_events: + self.assertEqual(remote_event.node_id, dst) + + if use_record_function: + scope_event = get_function_event(events, "foo") + # Since RPC call is within the scope, its CPU interval should be + # contained within foo's interval. + self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start) + self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end) + # the sender, dest worker, function run, and type of RPC should all + # be recorded. + self_worker_name = worker_name(self.rank) + dst_worker_name = worker_name(dst) + self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode) + if use_record_function: + # verify order by ensuring that the outer context comes + # before the rpc event. + foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name) + rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name) + self.assertLess(foo_event_ix, rpc_event_idx) + + def _run_test_profiler_with_sync_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,), + use_record_function=True) + + @dist_init + def test_profiler_with_sync_rpc_udf(self): + self._run_test_profiler_with_sync_rpc_udf() + + @dist_init + def test_profiler_with_sync_rpc_udf_single_threaded(self): + self._run_test_profiler_with_sync_rpc_udf() + + def _run_test_profiler_with_sync_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_sync_rpc_builtin(self): + self._run_test_profiler_with_sync_rpc_builtin() + + @dist_init + def test_profiler_with_sync_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_sync_rpc_builtin() + + def _run_test_profiler_with_async_rpc_udf(self): + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,), + use_record_function=True) + # Test to ensure that kineto profiler enabled in RPC does not enable + # RPC profiling (it is unsupported) and does not result in issues. + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_sleep_func, args=(1,), kineto_profile=True + ) + + @dist_init + def test_profiler_with_async_rpc_udf(self): + self._run_test_profiler_with_async_rpc_udf() + + @dist_init + def test_profiler_with_async_rpc_udf_single_threaded(self): + self._run_test_profiler_with_async_rpc_udf() + + def _run_test_profiler_with_async_rpc_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + + @dist_init + def test_profiler_with_async_rpc_builtin(self): + self._run_test_profiler_with_async_rpc_builtin() + + @dist_init + def test_profiler_with_async_rpc_builtin_single_threaded(self): + self._run_test_profiler_with_async_rpc_builtin() + + def _run_test_profiler_with_remote_udf(self): + self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,)) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank + ) + + @dist_init + def test_profiler_with_remote_udf(self): + self._run_test_profiler_with_remote_udf() + + @dist_init + def test_profiler_with_remote_udf_single_threaded(self): + self._run_test_profiler_with_remote_udf() + + def _run_test_profiler_with_remote_builtin(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)), + use_record_function=True + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + torch.mul, + args=(torch.ones(1), torch.ones(1)), + dst=self.rank, + ) + + @dist_init + def test_profiler_with_remote_builtin(self): + self._run_test_profiler_with_remote_builtin() + + @dist_init + def test_profiler_with_remote_builtin_single_threaded(self): + self._run_test_profiler_with_remote_builtin() + + def _run_test_profiler_with_script_async_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.ASYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_async_rpc(self): + self._run_test_profiler_with_script_async_rpc() + + @dist_init + def test_profiler_with_script_async_rpc_single_threaded(self): + self._run_test_profiler_with_script_async_rpc() + + def _run_test_profiler_with_script_sync_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.SYNC, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + + @dist_init + def test_profiler_with_script_sync_rpc(self): + self._run_test_profiler_with_script_sync_rpc() + + @dist_init + def test_profiler_with_script_sync_rpc_single_threaded(self): + self._run_test_profiler_with_script_sync_rpc() + + def _run_test_profiler_with_script_remote_rpc(self): + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),) + ) + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, + my_script_func, + args=(torch.tensor(1),), + use_record_function=True, + ) + # test remote to self + self._profiler_test_with_rpc( + RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank + ) + + @dist_init + def test_profiler_with_script_remote_rpc(self): + self._run_test_profiler_with_script_remote_rpc() + + @dist_init + def test_profiler_with_script_remote_rpc_single_threaded(self): + self._run_test_profiler_with_script_remote_rpc() + + def _assert_top_level_events(self, process_global_events, expected_top_level_event_names): + top_level_event_names = [] + for thread_local_events in process_global_events: + # Get top-level events from all events happened on a thread. + last_end_time = 0 + for event in thread_local_events: + event_name = event.name + time_range = event.time_range + if time_range.start > last_end_time: + top_level_event_names.append(event_name) + last_end_time = time_range.end + top_level_event_names = sorted(top_level_event_names) + expected_top_level_event_names = sorted(expected_top_level_event_names) + self.assertEqual( + top_level_event_names, + expected_top_level_event_names, + f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}", + ) + + @dist_init + def test_server_process_global_profiler(self): + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = worker_name(dst_rank) + + x = torch.tensor(1) + y = torch.tensor(2) + + outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + outer_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.add, (x, y)) + inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile) + inner_profile_rref.rpc_sync().__enter__() + rpc.rpc_sync(dst_worker_name, torch.sub, (x, y)) + inner_profile_rref.rpc_sync().__exit__(None, None, None) + outer_profile_rref.rpc_sync().__exit__(None, None, None) + + inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,)) + expected_inner_events = ['aten::sub'] + expected_outer_events = expected_inner_events + ['aten::add'] + + self._assert_top_level_events(inner_events, expected_inner_events) + outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,)) + self._assert_top_level_events(outer_events, expected_outer_events) + + inner_profile_rref.rpc_sync().key_averages() + outer_profile_rref.rpc_sync().key_averages() + + @dist_init + def test_async_record_function_double_end_callbacks(self): + num_sleep_seconds = 1 + if self.rank == 1: + # Validate that calling the function twice results in an error. + with _profile() as pf: + with torch.autograd.profiler.record_function("foo") as rf: + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + rf._call_end_callbacks_on_future(fut) + with self.assertRaisesRegex( + RuntimeError, "can only be called once." + ): + rf._call_end_callbacks_on_future(fut) + fut.wait() + + @dist_init + def test_async_record_function_legacy(self): + # Test the legacy _record_function ops work + # Note: These exist for backward compatibility with TorchScript + num_sleep_seconds = 1 + if self.rank == 1: + with _profile() as pf: + try: + handle = torch.ops.profiler._record_function_enter("foo", None) + fut = rpc.rpc_async( + worker_name(0), my_sleep_func, args=(num_sleep_seconds,) + ) + torch.ops.profiler._call_end_callbacks_on_jit_fut(handle, fut) + finally: + torch.ops.profiler._record_function_exit(handle) + + fut.wait() + + @dist_init + def test_async_record_function_cbs_jit_call(self): + if self.rank == 1: + with _profile() as pf: + key = _build_rpc_profiling_key( + RPCExecMode.ASYNC, + torch._jit_internal._qualified_name(my_script_func), + "worker1", + "worker0", + ) + with torch.autograd.profiler.record_function(key) as rf: + fut = rpc.rpc_async( + worker_name(0), my_script_func, args=(torch.tensor(1),) + ) + # Intentionally calling record_function internals + fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.record, fut) + result = fut.wait() + # Validate that the profiling future returns the same value as the RPC + # future. + expected = torch.add(torch.tensor(1), torch.tensor(1)) + self.assertEqual(result, expected) + events = pf.function_events + rpc_event = get_function_event( + events, torch._jit_internal._qualified_name(my_script_func) + ) + self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name) + + @dist_init + def test_py_class_constructor(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,)) + self.assertEqual(ret.a, n) + + @dist_init + def test_py_class_instance_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,) + ) + self.assertEqual(ret, MyClass(2).my_instance_method(n)) + + @dist_init + def test_py_class_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1) + ) + self.assertEqual(ret, MyClass.my_class_method(n, n + 1)) + + @dist_init + def test_py_class_static_method(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,) + ) + self.assertEqual(ret, MyClass.my_static_method(n + 10)) + + @dist_init + def test_py_multi_async_call(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker_info = rpc.get_worker_info(worker_name(dst_rank)) + fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,)) + fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2)) + self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10)) + self.assertEqual(fut2.wait(), min(n, n + 1, n + 2)) + + @dist_init + def test_py_no_return_result(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync(worker_name(dst_rank), no_result) + self.assertEqual(ret, no_result()) + + @dist_init + def test_py_tensors(self): + n = self.rank + 1 + dst_rank = n % self.world_size + ret = rpc.rpc_sync( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n))) + + @dist_init + def test_py_tensors_multi_async_call(self): + futs = [] + n = self.rank + 1 + dst_rank = n % self.world_size + for i in range(100): + fut = rpc.rpc_async( + worker_name(dst_rank), + my_tensor_function, + args=(torch.ones(i, i), torch.ones(i, i)), + ) + futs.append(fut) + + j = 0 + for val in torch.futures.wait_all(futs): + self.assertEqual( + val, my_tensor_function(torch.ones(j, j), torch.ones(j, j)) + ) + j += 1 + + @dist_init + def test_py_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [torch.ones(n, n), torch.ones(n, n)] + b = TensorClass(build_complex_tensors()) + c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)} + ret = rpc.rpc_sync( + worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c) + ) + self.assertEqual(ret, my_complex_tensor_function(a, b, c)) + + @dist_init + def test_py_nested_pickle(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + ret = rpc.rpc_sync( + worker_name(dst_rank), + run_nested_pickle, + args=(MyPickleClass(), torch.ones(2, 2)), + ) + + m = MyPickleClass() + m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2))) + self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2))) + + @dist_init + def test_py_function_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + with self.assertRaises(TypeError): + ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,)) + + @dist_init + def test_py_raise_in_user_func(self): + with captured_output() as (_, err): + # This barrier prevents a race condition where the main thread has + # not entered the context manager when the remote function runs. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func) + with self.assertRaisesRegex(ValueError, expected_err): + fut.wait() + # This barrier prevents a race condition where the main thread exits + # context manager before the remote function has ran. + dist.barrier() + + # Validate that trainers log errors when running functions. + stderr_lines = err.getvalue() + self.assertTrue(expected_err in stderr_lines) + + @dist_init + def test_py_raise_in_user_func_escaped_str(self): + n = self.rank + 1 + dst_rank = n % self.world_size + fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape) + try: + fut.wait() + except ValueError as e: + msg = str(e) + # Ensure newlines are unescaped to provide a better repr of error. + self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape")) + else: + self.assertTrue(False, "expected raise_func_escape to raise ValueError.") + + @dist_init + def test_nested_rpc(self): + self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1) + + @dist_init + def test_stress_light_rpc(self): + self._stress_test_rpc(light_rpc) + + @dist_init + def test_stress_heavy_rpc(self): + self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_stress_heavy_rpc_torchscript(self): + self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),)) + + @dist_init + def test_builtin_remote_ret(self): + self._builtin_remote_ret( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @dist_init + def test_builtin_remote_self(self): + self._builtin_remote_self( + torch.ones(2, 2), + torch.ones(2, 2), + torch.ones(2, 2) * 2 + ) + + @staticmethod + def _multi_args_fn(n, sparse=False): + if sparse: + return (build_sparse_tensor(), build_sparse_tensor()) + else: + return (torch.ones(n, n), torch.ones(n, n)) + + @dist_init + def test_multi_builtin_remote_ret(self): + self._test_multi_remote_call( + torch.add, False, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_py_udf_remote(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + my_function, + kwargs={"a": n, "b": n + 1, "c": n + 2}, + ) + self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2)) + + @staticmethod + def _multi_kwargs_fn(n, sparse=False): + if sparse: + return { + "a": build_sparse_tensor(), + "b": build_sparse_tensor(), + "c": build_sparse_tensor() + } + else: + return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)} + + @dist_init + def test_multi_py_udf_remote(self): + self._test_multi_remote_call( + my_function, + False, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args(self): + self._py_rref_args( + torch.ones(2, 2), + 1, + torch.ones(2, 2), + 2, + torch.ones(2, 2) * 2 + 3) + + @dist_init + def test_py_rref_args_user_share(self): + self._py_rref_args_user_share( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_py_rpc_rref_args(self): + self._py_rpc_rref_args( + torch.ones(2, 2), + 1, + 2, + torch.ones(2, 2), + 3, + 4, + torch.ones(2, 2) * 2 + 10 + ) + + @dist_init + def test_nested_remote(self): + self._nested_remote( + nested_remote, + torch.ones(2, 2) + 3 + ) + + @dist_init + def test_nested_rref(self): + self._nested_rref( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_nested_rref_stress(self): + self._nested_rref_stress( + nested_rref, + torch.ones(2, 2) + 1, + torch.ones(2, 2) + 2 + ) + + @dist_init + def test_multi_layer_nested_async_rpc(self): + # This test will exit right away, but there will be a chain of async + # RPCs. The termination algorithm should detect those messages properly. + # Otherwise, some peer could exit early, leaving others to timeout + # errors or connection closed errors. + ttl = 20 + n = self.rank + 1 + dst_rank = n % self.world_size + + multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl) + + @dist_init + def test_remote_with_exception(self): + n = self.rank + 1 + dst_rank = n % self.world_size + # check ref to other workers + rref = rpc.remote(worker_name(dst_rank), raise_func) + with self.assertRaises(ValueError): + rref.to_here() + # check ref to itself + rref = rpc.remote(worker_name(self.rank), no_result, args=(10,)) + with self.assertRaises(TypeError): + rref.to_here() + + @dist_init + def test_rpc_return_rref(self): + n = self.rank + 1 + dst_rank1 = n % self.world_size + dst_rank2 = (n + 1) % self.world_size + rref = rpc.rpc_sync( + worker_name(dst_rank1), + rpc_return_rref, + args=(worker_name(dst_rank2),), + ) + self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1) + + @dist_init + def test_rref_forward_chain(self): + ttl = 8 + n = self.rank + 1 + dst_rank = n % self.world_size + + rref = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + + ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl) + + for i in range(ttl): + self.assertEqual(len(ret_rref), 1) + ret_rref = ret_rref[0].to_here() + + ret = ret_rref + self.assertEqual(ret, torch.add(torch.ones(n, n), 1)) + + @dist_init + def test_local_rref_no_fork(self): + local_rref = RRef(35) + self.assertEqual(local_rref.local_value(), 35) + + @dist_init + def test_local_value_not_on_owner(self): + # ensure that an error message is thrown if a user tries to call + # local_value() on a non-owning node. + next_rank = (self.rank + 1) % self.world_size + rref = rpc.remote( + worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + with self.assertRaisesRegex( + RuntimeError, ( + fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), " + fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), " + r"can't call localValue\(\) on user " + fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). " + fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)" + ) + ): + rref.local_value() + + @dist_init + def test_return_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + + rref_list = rpc.rpc_sync( + worker_name(dst_rank), get_rref_list, args=([1, 2, 3],) + ) + + for rref in rref_list: + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, 10), + ) + + rets = [ + rpc.rpc_sync( + rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref) + ) + for rref in rref_list + ] + + self.assertEqual(rets, [11, 12, 13]) + + @dist_init + def _test_rref_type(self, blocking): + + def launched_rpc(events): + expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner" + return any(e.name.startswith(expected_name) for e in events) + + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1)) + + with _profile() as p: + t = rref._get_type(blocking=blocking) + if not blocking: + t = t.wait() + + self.assertTrue(launched_rpc(p.function_events)) + expected_type = type(torch.ones(2)) + self.assertEqual(t, expected_type) + + futs = [] + + def verify(fut): + self.assertEqual(fut.value(), expected_type) + + with _profile() as p: + for _ in range(10): + t = rref._get_type(blocking=blocking) + if not blocking: + futs.append(t) + t.add_done_callback(verify) + t = t.wait() + self.assertEqual(t, expected_type) + + if not blocking: + # Note that cached calls with blocking=False all return the same + # cached original future. + first_fut = futs[0] + for f in futs[1:]: + self.assertTrue(f is first_fut) + # Ensure we never launch another RPC, other than for the very + # first call. + self.assertFalse(launched_rpc(p.function_events)) + self.assertEqual(t, type(torch.ones(2))) + + rref = rpc.remote(dst, MyClass, args=(0,)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_blocking(self): + self._test_rref_type(blocking=True) + + def test_rref_type_non_blocking(self): + self._test_rref_type(blocking=False) + + @dist_init + def _test_rref_type_with_error(self, blocking): + dst = worker_name((self.rank + 1) % self.world_size) + # 10 ms timeout + rref = rpc.remote(dst, raise_func) + # Blocking: error raised inline + if blocking: + with self.assertRaisesRegex(ValueError, "Expected error"): + rref._get_type(blocking=blocking) + else: + # Non-blocking: Immediately return future, block on wait + fut = rref._get_type(blocking=blocking) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + + + def test_rref_type_with_error_blocking(self): + self._test_rref_type_with_error(blocking=True) + + def test_rref_type_with_error_non_blocking(self): + self._test_rref_type_with_error(blocking=False) + + @dist_init + def _test_rref_type_owner(self, blocking): + rref = RRef(torch.ones(2) + 1) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, type(torch.ones(2))) + + rref = RRef(MyClass(0)) + rref_type = rref._get_type(blocking=blocking) + if not blocking: + rref_type = rref_type.wait() + self.assertEqual(rref_type, MyClass) + + def test_rref_type_owner_blocking(self): + self._test_rref_type_owner(blocking=True) + + def test_rref_type_owner_non_blocking(self): + self._test_rref_type_owner(blocking=False) + + @staticmethod + def _slow_add(x, y): + time.sleep(1) + return x + y + + @dist_init + def test_rref_type_slow_init(self): + dst = worker_name((self.rank + 1) % self.world_size) + rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1)) + self.assertEqual(rref._get_type(), type(torch.ones(2))) + + @dist_init + def test_owner_equality(self): + a = RRef(40) + b = RRef(50) + + other_rank = (self.rank + 1) % self.world_size + other_a = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_b = rpc.remote( + worker_name(other_rank), torch.add, args=(torch.ones(1), 1) + ) + other_a.to_here() # to ensure clean termination + other_b.to_here() + + self.assertNotEqual(a.owner(), 23) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertNotEqual(a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_a.owner()) + self.assertEqual(other_a.owner(), other_b.owner()) + self.assertEqual(a.owner(), a.owner()) + self.assertEqual(a.owner(), b.owner()) + self.assertEqual(a.owner(), rpc.get_worker_info()) + x = {} + x[a.owner()] = a + x[other_a.owner()] = other_a + self.assertEqual(x[a.owner()], a) + self.assertEqual(x[b.owner()], a) + self.assertEqual(x[other_a.owner()], other_a) + self.assertEqual(x[other_b.owner()], other_a) + self.assertEqual(len(x), 2) + + @dist_init + def test_pass_local_rrefs(self): + n = self.rank + 1 + dst_rank = n % self.world_size + dst_worker = worker_name(dst_rank) + + rref = RRef(40) + self.assertEqual( + rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90 + ) + self.assertEqual( + rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90 + ) + self.assertEqual( + rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90 + ) + + @dist_init + def test_remote_same_worker(self): + n = self.rank + 1 + dst_rank = n % self.world_size + rref_a = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2) + ) + rref_b = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1) + ) + rref_c = rpc.remote( + worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b) + ) + self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4) + + @dist_init(setup_rpc=True) + def test_call_method_on_rref(self): + """ + Tests that it is possible to call an instance method on a remote object + by using rref.owner() as destination of the call. + """ + vals = [10, 2, 5, 7] + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + + # creates a remote object + rref = rpc.remote(dst_worker, MyClass, args=(vals[0],)) + + # modifies state of the remote object + rpc.rpc_sync( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[1]), + ) + rpc.rpc_async( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[2]), + ).wait() + rpc.remote( + rref.owner(), + _call_method_on_rref, + args=(MyClass.increment_value, rref, vals[3]), + ).to_here() + + # queries state of the remote object + result = rpc.rpc_sync( + dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref) + ) + + self.assertEqual(result, sum(vals)) + + # Notice `rpc.api.shutdown()` accesses + # `_delete_all_user_and_unforked_owner_rrefs` through + # `torch.distributed.rpc.api`, so patching + # `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will + # not help. + @mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs") + def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + initialize_pg(self.file_init_method, self.rank, self.world_size) + # Wait for all init to complete. + dist.barrier() + + rref = rpc.remote( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.ones(2, 2), 1), + ) + + import torch.distributed.rpc.api as api + + if ignore_leak: + api._ignore_rref_leak = True + rpc.shutdown(graceful=True) + else: + api._ignore_rref_leak = False + with self.assertRaisesRegex(RuntimeError, "Leaking RRef"): + rpc.shutdown(graceful=True) + + @dist_init(setup_rpc=False) + def test_rref_leak(self): + self._test_rref_leak(ignore_leak=False) + + @dist_init(setup_rpc=False) + def test_ignore_rref_leak(self): + self._test_rref_leak(ignore_leak=True) + + @dist_init + def test_rref_str(self): + rref1 = RRef(self.rank) + id_class = "GloballyUniqueId" + self.assertEqual( + f"OwnerRRef({id_class}(created_on={self.rank}, local_id=0))", rref1.__str__() + ) + + dst_rank = (self.rank + 1) % self.world_size + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + self.assertEqual( + rref2.__str__(), + f"UserRRef(RRefId = {id_class}(created_on={self.rank}, local_id=1), " + f"ForkId = {id_class}(created_on={self.rank}, local_id=2))", + ) + + @dist_init + def test_rref_get_future(self): + # Tests that we can obtain the future corresponding to the creation of + # the RRef on remote end + if self.rank == 0: + # Builtin + rref = rpc.remote(worker_name(1), torch.add, args=(1, 1)) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # UDF + rref = rpc.remote(worker_name(1), foo_add, args=()) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + # Script + rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), )) + rref.to_here() + fut = rref._get_future() + self.assertIsInstance(fut, torch._C.Future) + + + @dist_init + def test_rref_context_debug_info(self): + # This test checks local states that are modified by remote workers. + # This means that we would need barrier before and after every check. + # The barrier before the check makes sure that all previous states are + # cleared globally, the barrier after ensures that no following states + # change gets into the current check. + initialize_pg(self.file_init_method, self.rank, self.world_size) + + # Check 1: local RRef does not update owners_ map or add a pending user. + ################################################# + + rref1 = RRef(self.rank) + + # don't need a barrier here as local RRef is handled by this thread + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertIn("num_pending_users", info) + # RRef on local value is not added to context until shared across RPC + self.assertEqual(0, int(info["num_owner_rrefs"])) + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after the check 1 + dist.barrier() + + # Check 2: Sharing RRef as an arg should update owners_ map + ########################################################### + + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,)) + + # barrier before check 2 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(1, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + # barrier after check 2 + dist.barrier() + + # clear states for check 2 + rpc.rpc_sync(worker_name(dst_rank), clear_global_rref) + + # Wait for owner rref to be cleared. + while int(info["num_owner_rrefs"]) != 0: + info = _rref_context_get_debug_info() + time.sleep(0.1) + dist.barrier() + + # Check 3: rpc.remote call should update owners_ map + #################################################### + rref2 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref3 = rpc.remote( + worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1) + ) + rref2.to_here() + rref3.to_here() + + # barrier before check 3 + wait_until_pending_futures_and_users_flushed() + dist.barrier() + + info = _rref_context_get_debug_info() + self.assertIn("num_owner_rrefs", info) + self.assertEqual(2, int(info["num_owner_rrefs"])) + # no pending users since the fork is finished + self.assertEqual(0, int(info["num_pending_users"])) + + # barrier after check 3 + dist.barrier() + + @dist_init + def test_disable_gil_profiling(self): + # test that rpc.enable_gil_profiling(false) will result in + # GIL wait time not being recorded. + + # GIL profiling should be disabled by default. + dst_rank = (self.rank + 1) % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"]) + rpc.enable_gil_profiling(True) + rpc.rpc_sync( + worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1)) + ) + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertIn("agent.gil_average_wait_time_us", info) + + @dist_init(setup_rpc=False) + def test_local_shutdown(self): + # test that we can start RPC and then immediately locally shutdown + # without sending any messages. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init + def test_debug_info(self): + # only test keys in this test case. Values should be covered by + # individual module debug info tests + import torch.distributed.autograd as dist_autograd + + info = _get_debug_info() + rref_info = _rref_context_get_debug_info() + agent_info = rpc.api._get_current_rpc_agent().get_debug_info() + autograd_info = dist_autograd._get_debug_info() + common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys() + self.assertEqual(0, len(common_keys)) + expected = {} + expected.update(rref_info) + expected.update(agent_info) + expected.update(autograd_info) + # NB: Key ordering is only preserved in python 3.6+. So here, we + # manually check keys are equal. + for key in expected.keys(): + self.assertIn(key, info.keys()) + + for key in info.keys(): + self.assertIn(key, expected.keys()) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + IS_MACOS, + "Test is flaky on MacOS since libuv error handling is not as robust as TCP", + ) + def test_handle_send_exceptions(self): + # test that if a callee node has gone down, we raise an appropriate + # exception instead of just crashing. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc._set_rpc_timeout(10) + # This barrier is needed to ensure that some workers do not exit before + # others have been brought up. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + if self.rank == 1: + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + # allow destination worker to exit without joining + error_str = self.get_shutdown_error_regex() + wait_until_node_failure(dst_rank, error_str) + fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3)) + # Shutdown sequence is not very well defined and as a result + # we can see any of the error messages defined in get_shutdown_error_regex. + with self.assertRaisesRegex(RuntimeError, error_str): + fut.wait() + # exit all workers non-gracefully. + rpc.shutdown(graceful=False) + + @dist_init + def test_deadlock(self): + # this test is copied from https://github.com/pytorch/pytorch/issues/45089 + if self.rank == 1: + dst1 = worker_name((self.rank + 1) % self.world_size) + x = torch.ones(2) + y = torch.ones(2) + rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait() + + dist_initialized = dist.is_initialized() + if not dist_initialized: + dist.init_process_group( + backend="gloo", + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size, + ) + + @dist_init(setup_rpc=False) + def test_local_shutdown_with_rpc(self): + # test that we can start RPC, send RPCs, and then run local shutdown. + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rpc.rpc_sync( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # A barrier is needed to ensure that all RPCs are processed. + # Otherwise, some RPCs can timeout since the receiving end + # has terminated. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + # pass in graceful=False to ensure that we don't wait for other workers. + rpc.shutdown(graceful=False) + + @dist_init(setup_rpc=False) + def test_set_and_get_default_rpc_timeout(self): + timeout = 0.5 + + # A new `RpcBackendOptions` is constructed + # when accessing `self.rpc_backend_options`. + rpc_backend_options = self.rpc_backend_options + rpc_backend_options.rpc_timeout = timeout + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + set_timeout = rpc.get_rpc_timeout() + self.assertEqual(timeout, set_timeout) + rpc.shutdown() + + @dist_init + def test_default_timeout_used(self): + """ + Tests that if no timeout is passed into rpc_async and rpc_sync, then the + default timeout is used. + """ + dst_rank = (self.rank + 1) % self.world_size + rpc._set_rpc_timeout(0.001) # 1 ms + # futures should time out and be marked with an exception indicating it as such. + futs = [ + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()) + for _ in range(10) + ] + expected_error = self.get_timeout_error_regex() + for fut in futs: + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # ensure that if a new timeout is set old futures don't time out but new ones do. + rpc._set_rpc_timeout(200) # 200 seconds + # create a longstanding RPC. + fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + # now, set a short timeout. + rpc._set_rpc_timeout(0.001) + # fut2 should time out, fut1 should not. + fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut2.wait() + fut1.wait() + + # Zero timeout means infinity, so future should run to completion. + rpc._set_rpc_timeout(0) + rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait() + + # reset to default timeout so shutdown messages can process cleanly. + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + @dist_init + def test_rpc_timeouts(self): + # TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803) + dst_rank = (self.rank + 1) % self.world_size + dst_worker = worker_name(dst_rank) + timeout = 0.1 # 100 ms + expected_error = self.get_timeout_error_regex() + # Test async UDF + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait() + + # Test sync UDF + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout) + + # Ensure run to completion if there is no timeout and we use the default + # RPC timeout. + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # If we set a default timeout for RPCs, it should be respected, though + # still overridden if we pass in a different timeout to the APIs. + rpc._set_rpc_timeout(0.001) + fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)) + with self.assertRaisesRegex(RuntimeError, expected_error): + fut.wait() + with self.assertRaisesRegex(RuntimeError, expected_error): + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,)) + + # The RPCs should run to completion since we override the timeout. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5) + # Passing in a zero timeout should ensure that the RPC won't time out. + rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait() + rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0) + # Reset for clean shutdown + rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC) + + def test_dist_init_decorator(self): + @dist_init(setup_rpc=False) + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + @dist_init + def test_func(self): + return "expected result" + + self.assertEqual(test_func(self), "expected result") + + def test_use_rpc_pickler(self): + class TestPickler: + pass + + test_pickler = TestPickler() + with _use_rpc_pickler(test_pickler): + self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler) + self.assertTrue( + torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler + ) + + @dist_init + def test_wait_all(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) + self.assertTrue(len(_thread_local_var.future_list) == 1) + self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future)) + self.assertTrue(fut.done()) + self.assertEqual(fut.wait(), torch.ones(2, 2) + 1) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_multiple_call(self): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + for i in range(20): + fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1)) + res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1)) + self.assertEqual(res, torch.ones(i, i) + 1) + self.assertEqual(fut.wait(), torch.ones(i, i) + 1) + self.assertTrue(len(_thread_local_var.future_list) == 20) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_timeout(self): + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + timeout = 0.1 # 100 ms + fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_user_func(self): + with self.assertRaises(ValueError): + with _wait_all(): + self.assertTrue(_thread_local_var.future_list == []) + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_wait_all_raise_in_body(self): + with self.assertRaises(ValueError): + with _wait_all(): + raise_func() + self.assertFalse(hasattr(_thread_local_var, "future_list")) + + @dist_init + def test_custom_exception_throw_during_reconstruction(self): + """ + Test that we still throw info about the remote side exception even when + we cannot recreate it on client side. + """ + initialize_pg(self.file_init_method, self.rank, self.world_size) + if self.rank != 0: + exc_caught = False + dst = worker_name(0) + try: + rpc.rpc_sync(dst, custom_raise_func, args=()) + except RuntimeError as e: + exc_caught = True + msg = str(e) + print(f"Got msg {msg}") + self.assertTrue("Original exception on remote side was" in msg) + self.assertTrue("CustomException" in msg) + except BaseException as e: + raise RuntimeError( + f"Failure - expected RuntimeError, got {e}" + ) from e + finally: + self.assertTrue(exc_caught) + + dist.barrier() + + + timed_out_rpc_event = None + + @staticmethod + def timed_out_rpc(): + RpcTest.timed_out_rpc_event.wait() + + @dist_init + def test_wait_all_exit_early_python(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func) + fut3 = rpc.rpc_async(dst, raise_func) + + # We should receive the error from fut2 + with self.assertRaisesRegex(ValueError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_builtin(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5))) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, "size of tensor"): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + @dist_init + def test_wait_all_exit_early_script_function(self): + # Initialize the event in the subprocess. + RpcTest.timed_out_rpc_event = Event() + + # Wait for all processes to initialize event. + initialize_pg(self.file_init_method, self.rank, self.world_size) + dist.barrier() + + dst = worker_name((self.rank + 1) % self.world_size) + fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc) + fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,)) + + # We should receive the error from fut2 + with self.assertRaisesRegex(RuntimeError, expected_err): + torch.futures.wait_all([fut1, fut2, fut3]) + + # Unblock RPC thread for fut1 + RpcTest.timed_out_rpc_event.set() + + + @dist_init + def test_function_not_on_callee(self): + # test that if a function does not exist on a callee, we don't crash, + # instead we get an AttributeError indicating that the func does not exist. + this_module = sys.modules[__name__] + caller_worker = "worker0" + callee_worker = "worker1" + + if self.rank == 1: + # Use delattr to remove the binding of a func on this nodes + delattr(this_module, "foo_add") + # notify remote end that we have removed it. + rpc.rpc_sync(caller_worker, set_value, args=(self.rank,)) + + if self.rank == 0: + # func exists on caller, but not callee. + # wait for remote end to remove the binding of foo_add func. + wait_for_value_future() + # Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error. + self.assertTrue(hasattr(this_module, "foo_add")) + with self.assertRaisesRegex( + RuntimeError, "RPC pickler does not serialize" + ): + rpc.rpc_sync(callee_worker, foo_add, args=()) + + @dist_init + def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self): + dst_worker_name = worker_name((self.rank + 1) % self.world_size) + + a = MyClass(1) + b = MyClass(2) + + # This is to make Python not garbage collect a and b. + a.other = b + b.other = a + + n = self.rank + a.rref = rpc.remote( + dst_worker_name, + torch.add, + args=(torch.ones(n, n), 2) + ) + + @dist_init(setup_rpc=False) + def test_use_rref_after_shutdown(self): + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + n = self.rank + 1 + dst_rank = n % self.world_size + rref = rpc.remote( + worker_name(dst_rank), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)), + ) + # pass in graceful=True to ensure that local UserRRefs are deleted. + rpc.shutdown(graceful=True) + + with self.assertRaisesRegex( + RuntimeError, "Cannot call to_here\\(\\) on it after deletion." + ): + rref.to_here() + + with self.assertRaisesRegex( + RuntimeError, "Cannot call fork an UserRRef after deletion." + ): + import torch.distributed.rpc.internal as internal + internal.serialize(rref) + + @staticmethod + def _return_gpu_tensor(): + return torch.rand(3, 3).cuda(0) + + @staticmethod + def _return_gpu_tensor_list(): + return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)] + + @staticmethod + def _gpu_tensor_list_arg(tensor_list): + return torch.rand(3, 3) + + def _create_rref(self): + owner_rank = (self.rank + 2) % self.world_size + return rpc.remote( + worker_name(owner_rank), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + + @dist_init + def test_user_rrefs_confirmed(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret = rpc.rpc_sync( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret, True) + + @dist_init + def test_user_rrefs_confirmed_remote(self): + dst_rank = (self.rank + 1) % self.world_size + rref = self._create_rref() + ret_rref = rpc.remote( + worker_name(dst_rank), + check_rref_confirmed, + args=(rref,) + ) + self.assertEqual(ret_rref.to_here(), True) + + @dist_init + def test_rref_py_pickle_not_supported(self): + local_rref = RRef(35) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"): + torch.save(local_rref, fname) + + @dist_init + def test_remote_throw(self): + rref = rpc.remote(worker_name((self.rank + 1) % self.world_size), + raise_or_inc, + args=(torch.ones(2),)) + with self.assertRaisesRegex(Exception, ".*Expected error.*"): + rref.to_here() + + @dist_init + def test_non_cont_tensors(self): + if self.rank == 0: + # Create a non-contiguous tensor. + t = torch.rand(5, 5) + t_view = t.narrow(1, 2, 2) + self.assertFalse(t_view.is_contiguous()) + t_cont = t_view.contiguous() + self.assertTrue(t_cont.is_contiguous()) + self.assertEqual(t_view, t_cont) + + # Send non-cont tensor over RPC. + next_rank = (self.rank + 1) % self.world_size + t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont)) + + # Verify the returned tensor. + self.assertEqual(t_view, t_ret) + self.assertFalse(t_ret.is_contiguous()) + + @dist_init + def test_callback_simple(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + def callback(fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + set_by_cb.set_result(ret.clone() + 1) + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_wrong_arg_num(self): + set_by_cb = concurrent.futures.Future() + n = self.rank + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_fut = fut.then(my_function) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + with self.assertRaisesRegex( + RuntimeError, + "my\\_function\\(\\) missing 2 required positional arguments" + ): + cb_fut.wait() + + @dist_init + def test_callback_wrong_arg_type(self): + dst = worker_name((self.rank + 1) % self.world_size) + + fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1)) + fut1 = fut0.then(lambda x: x + 1) + + with self.assertRaisesRegex( + RuntimeError, + "unsupported operand type\\(s\\) for \\+" + ): + fut1.wait() + + @dist_init + def test_callback_multi(self): + num_cbs = 10 + n = self.rank + 1 + + def callback(idx, fut): + ret = fut.wait() + self.assertEqual(ret, torch.ones(n, n) * 2) + return ret + idx + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + cb_futs = [] + for idx in range(num_cbs): + cb_futs.append(fut.then(partial(callback, idx))) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + for idx in range(num_cbs): + self.assertEqual( + cb_futs[idx].wait(), + torch.ones(n, n) * 2 + idx + ) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_callback_chain(self): + n = self.rank + 1 + dst = worker_name(n % self.world_size) + + def callback(fut): + return fut.wait() + 1 + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), 1) + ) + + num_cbs = 20 + for _ in range(num_cbs): + fut = fut.then(callback) + + self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs) + + @dist_init + def test_callback_in_rpc(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + add_use_future_cb, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_callback_with_ret(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + fut2 = rpc.rpc_async( + dst, + torch.add, + args=(fut0.wait(), 1) + ).then(lambda fut1: fut1.wait() + 1) + + return fut2.wait() + + fut3 = rpc.rpc_async( + dst, + torch.add, + args=(torch.ones(2, 2), 1) + ).then(callback) + + self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3) + + @dist_init + def test_callback_with_error(self): + dst = worker_name((self.rank + 1) % self.world_size) + + def callback(fut0): + with self.assertRaisesRegex(ValueError, "Expected error"): + fut0.wait() + raise RuntimeError("Another expected error") + + fut1 = rpc.rpc_async(dst, raise_func).then(callback) + with self.assertRaisesRegex(RuntimeError, "Another expected error"): + fut1.wait() + + @dist_init + def test_callback_none(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + TypeError, + "incompatible function arguments." + ): + rpc.rpc_async(dst, raise_func).then(None) + + @dist_init + def test_add_done_callback(self): + set_by_cb = False + n = self.rank + 1 + + def callback(fut): + nonlocal set_by_cb + fut.wait() + set_by_cb = True + + fut = rpc.rpc_async( + worker_name(n % self.world_size), + torch.add, + args=(torch.ones(n, n), torch.ones(n, n)) + ) + + fut.add_done_callback(callback) + fut_then = fut.then(lambda _: True) + + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + # We have no guarantee that the add_done_callback fn will execute before the test finishes. + # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback + fut_then.wait() + self.assertTrue(set_by_cb) + self.assertEqual(fut.wait(), torch.ones(n, n) * 2) + + @dist_init + def test_mark_future_twice(self): + fut = rpc.rpc_async( + worker_name((self.rank + 1) % self.world_size), + torch.add, + args=(torch.zeros(2, 2), 1) + ) + self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1) + with self.assertRaisesRegex( + RuntimeError, + "Future can only be marked completed once" + ): + fut.set_result(1) + + @dist_init + def test_pickle_future(self): + fut = torch.futures.Future() + errMsg = "Can not pickle torch.futures.Future" + + dst = worker_name((self.rank + 1) % self.world_size) + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_sync(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.rpc_async(dst, fail_on_fut, args=(fut,)) + + with TemporaryFileName() as fname: + with self.assertRaisesRegex(RuntimeError, errMsg): + rpc.remote(dst, fail_on_fut, args=(fut,)) + + @dist_init + def test_future_done(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1)) + fut.wait() + self.assertTrue(fut.done()) + + @dist_init + def test_future_done_exception(self): + dst = worker_name((self.rank + 1) % self.world_size) + fut = rpc.rpc_async(dst, raise_func) + with self.assertRaisesRegex(ValueError, "Expected error"): + fut.wait() + self.assertTrue(fut.done()) + + def _test_future_cb(self, func): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync( + dst1, + func, + args=(dst2, torch.ones(2, 2), 1, 2) + ) + self.assertEqual(ret, torch.ones(2, 2) + 1 + 2) + + @dist_init + def test_future_in_rpc(self): + self._test_future_cb(add_use_future_set_result) + + @dist_init + def test_future_nested_callback(self): + self._test_future_cb(add_use_future_nested_cb) + + def _test_async_function_raise(self, mode): + with self.assertRaisesRegex(RuntimeError, "Expected error"): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_raise_func, + mode + ) + + @dist_init + def test_async_function_raise(self): + self._test_async_function_raise(RPCExecMode.SYNC) + + @dist_init + def test_async_function_raise_async(self): + self._test_async_function_raise(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_raise_remote(self): + self._test_async_function_raise(RPCExecMode.REMOTE) + + def _test_async_function_wrong_return_type(self, mode): + errMsg = ( + "Functions decorated with @rpc\\.async_function must return a " + "torch\\.futures\\.Future object," + ) + with self.assertRaisesRegex(RuntimeError, errMsg): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + async_wrong_type, + mode + ) + + @dist_init + def test_async_function_wrong_return_type(self): + self._test_async_function_wrong_return_type(RPCExecMode.SYNC) + + @dist_init + def test_async_function_wrong_return_type_async(self): + self._test_async_function_wrong_return_type(RPCExecMode.ASYNC) + + @dist_init + def test_async_function_wrong_return_type_remote(self): + self._test_async_function_wrong_return_type(RPCExecMode.REMOTE) + + @dist_init + def test_async_function_simple(self): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + def _test_async_function(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + args = (dst2, torch.ones(2, 2), 1, 2) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + 3) + + @dist_init + def test_async_function_with_future_ctor(self): + self._test_async_function(async_add_with_future_ctor) + + @dist_init + def test_async_function_with_future_ctor_remote(self): + self._test_async_function( + async_add_with_future_ctor, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_chained(self): + self._test_async_function(async_add_chained) + + @dist_init + def test_async_function_chained_remote(self): + self._test_async_function(async_add_chained, RPCExecMode.REMOTE) + + @dist_init + def test_async_function_nested(self): + self._test_async_function(async_add_nested) + + @dist_init + def test_async_function_nested_remote(self): + self._test_async_function(async_add_nested, RPCExecMode.REMOTE) + + @dist_init + def test_async_static_method(self): + self._test_async_function(AsyncExecutionClass.static_async_add) + + @dist_init + def test_async_static_method_remote(self): + self._test_async_function( + AsyncExecutionClass.static_async_add, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_class_method(self): + self._test_async_function(AsyncExecutionClass.class_async_add) + + @dist_init + def test_async_class_method_remote(self): + self._test_async_function( + AsyncExecutionClass.class_async_add, + RPCExecMode.REMOTE + ) + + def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + rref = rpc.remote(dst1, AsyncExecutionClass) + + x = torch.ones(2, 2) + y = torch.ones(2, 2) + 1 + if mode == RPCExecMode.SYNC: + ret = rref.rpc_sync().static_async_add(dst2, x, x, y) + ret += rref.rpc_sync().class_async_add(dst2, x, x, y) + ret += rref.rpc_sync().bound_async_add(dst2, x, x, y) + elif mode == RPCExecMode.ASYNC: + ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait() + ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait() + elif mode == RPCExecMode.REMOTE: + ret = rref.remote().static_async_add(dst2, x, x, y).to_here() + ret += rref.remote().class_async_add(dst2, x, x, y).to_here() + ret += rref.remote().bound_async_add(dst2, x, x, y).to_here() + + self.assertEqual(ret, 3 * 4 * x) + + @dist_init + def test_async_class_rref_proxy(self): + self._test_test_async_class_rref_proxy() + + @dist_init + def test_async_class_rref_proxy_async(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC) + + @dist_init + def test_async_class_rref_proxy_remote(self): + self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE) + + def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC): + dst1 = worker_name((self.rank + 1) % self.world_size) + dst2 = worker_name((self.rank + 2) % self.world_size) + + num = 20 + step = 3 + args = (dst2, torch.ones(2, 2), num, step) + ret = self._run_func_in_mode(dst1, fn, mode, args=args) + self.assertEqual(ret, torch.ones(2, 2) + num * step) + + @dist_init + def test_async_function_multi_chained(self): + self._test_async_function_multi(async_add_chained_multi) + + @dist_init + def test_async_function_multi_chained_async(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_chained_remote(self): + self._test_async_function_multi( + async_add_chained_multi, + RPCExecMode.REMOTE + ) + + @dist_init + def test_async_function_multi_fanout(self): + self._test_async_function_multi(async_add_multi_fanout) + + @dist_init + def test_async_function_multi_fanout_async(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.ASYNC + ) + + @dist_init + def test_async_function_multi_fanout_remote(self): + self._test_async_function_multi( + async_add_multi_fanout, + RPCExecMode.REMOTE + ) + + def _test_return_future(self, mode): + with self.assertRaisesRegex( + RuntimeError, + "Can not pickle torch.futures.Future" + ): + self._run_func_in_mode( + worker_name((self.rank + 1) % self.world_size), + return_future, + mode + ) + + @dist_init + def test_return_future(self): + self._test_return_future(RPCExecMode.SYNC) + + @dist_init + def test_return_future_async(self): + self._test_return_future(RPCExecMode.ASYNC) + + @dist_init + def test_return_future_remote(self): + self._test_return_future(RPCExecMode.REMOTE) + + @dist_init + def test_rref_timeout(self): + # This test is similar to ones in FaultyProcessGroupTest, but is meant to be + # run with other backends besides ProcessGroup. + if self.rank != 0: + return + + dst_rank = (self.rank + 1) % self.world_size + dst_worker = f"worker{dst_rank}" + # 10 ms timeout + rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01) + # Future corresponding to the remote creation should time out. + expected_error = self.get_timeout_error_regex() + with self.assertRaisesRegex(RuntimeError, expected_error): + rref._get_future().wait() + # Call to ensure pending callbacks are run. + wait_until_pending_futures_and_users_flushed() + with self.assertRaisesRegex(RuntimeError, "RRef creation"): + rref.to_here() + + wait_until_owners_and_forks_on_rank(1, 1, rank=1) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_pg_then_rpc(self): + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614." + ) + def test_init_rpc_then_pg(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.init_process_group( + backend="gloo", + init_method=self.init_method, + rank=self.rank, + world_size=self.world_size, + ) + + # Test RPC. + next_rank = (self.rank + 1) % self.world_size + ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1)) + self.assertEqual(ret, torch.ones(2, 2) + 1) + + # Test PG + dist.barrier() + + rpc.shutdown() + + @dist_init + def test_wait_all_with_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init + def test_wait_all_with_partial_exception(self): + futs = [] + dst = worker_name((self.rank + 1) % self.world_size) + for _ in range(10): + futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1))) + + futs.append(rpc.rpc_async(dst, raise_func)) + + with self.assertRaisesRegex(ValueError, "Expected error"): + ret = torch.futures.wait_all(futs) + + @dist_init(setup_rpc=False) + @skip_but_pass_in_sandcastle_if( + os.environ.get("RPC_INIT_WITH_TCP", None) == "1", + "Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491", + ) + def test_init_rpc_twice(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Wait for all init to complete. + dist.barrier() + + # Use a different file name for the next initialization + new_backend_options = self.rpc_backend_options + new_backend_options.init_method += "init_2" + + # Ensure rpc initialization works again. + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=new_backend_options, + ) + + # Verify RPCs work after re-init. + dst = worker_name((self.rank + 1) % self.world_size) + rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1)) + rpc.rpc_sync(dst, foo_add, args=()) + + rpc.shutdown() + + def test_wrong_types(self): + with self.assertRaisesRegex( + TypeError, + "Argument backend must be a member of BackendType", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend="TENSORPIPE", + ) + + with self.assertRaisesRegex( + TypeError, + "Argument rpc_backend_options must be an instance of RpcBackendOptions", + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=self.rpc_backend, + rpc_backend_options={"init_method": self.init_method} + ) + + def test_cannot_infer_backend_from_options(self): + # An exception should be raised if the backend isn't specified but + # options are given which are not an instance of any of the known + # agents' option classes. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex(TypeError, "Could not infer backend for options"): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + @dist_init + def test_owner_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t1 = torch.rand(10, 10, requires_grad=True) + rref = rpc.RRef(t1.sum() + t1.sum()) + rref.backward() + expected_grad = torch.ones_like(t1) * 2 + self.assertEqual(expected_grad, t1.grad) + + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id) + self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1]) + + # Double backward. + with dist_autograd.context() as context_id: + t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1)) + rref = rpc.RRef(t2.sum()) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1]) + + # Test errors. + with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"): + rpc.RRef(torch.rand(10)).backward() + + with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"): + rpc.RRef(torch.rand(10, requires_grad=True)).backward() + + with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"): + rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100) + + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rpc.RRef("foo").backward() + + @staticmethod + def _sum(x): + return x.sum() + + @staticmethod + def _identity(x): + return x + + @dist_init + def test_user_rref_backward(self): + dst = worker_name((self.rank + 1) % self.world_size) + t = torch.rand(10, requires_grad=True) + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._sum, args=(t,)) + rref.backward(context_id, retain_graph=True) + rref.backward(context_id) + self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t]) + + with dist_autograd.context() as context_id: + rref = rpc.remote(dst, RpcTest._identity, args=("foo",)) + with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"): + rref.backward(context_id) + + with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"): + rref.backward() + + @dist_init(setup_rpc=False) + def test_shutdown_errors(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options, + ) + + if self.rank != 0: + og_func = rpc.api._broadcast_to_followers + og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs + + # Monkey-patch _broadcast_to_followers to fail, which would ensure + # _all_gather on leader raises an exception. + def raise_error(sequence_id, objects_map): + og_func(sequence_id, objects_map) + raise RuntimeError('simulation') + + # Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail, + # which would ensure barrier is not called on followers. + def rref_error(): + raise RuntimeError('simulation rref') + + try: + rpc.api._broadcast_to_followers = raise_error + rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error + with self.assertRaisesRegex(RuntimeError, 'simulation rref'): + rpc.shutdown() + finally: + rpc.api._broadcast_to_followers = og_func + rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func + else: + with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'): + rpc.shutdown() + + dist.barrier() + + @dist_init + def test_my_parameter_server(self): + self._my_parameter_server(False) + + +class CudaRpcTest(RpcAgentTestFixture): + + @skip_if_lt_x_gpu(2) + @dist_init + def test_profiler_remote_cuda(self): + if self.rank != 1: + return + + dst_cuda_0 = (self.rank + 1) % self.world_size + dst_cuda_1 = (self.rank + 2) % self.world_size + dst_worker_cuda_0 = worker_name(dst_cuda_0) + dst_worker_cuda_1 = worker_name(dst_cuda_1) + + with _profile(use_cuda=True) as p: + fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, )) + fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, )) + fut1.wait() + fut2.wait() + + def get_name(event): + return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):] + + function_events = p.function_events + for event in function_events: + if event.is_async: + self.assertEqual(0, event.device_time_total) + self.assertEqual([], event.kernels) + self.assertEqual(0, event.device_time) + else: + if event.node_id == 1: + continue + self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) + if get_name(event) in EXPECTED_REMOTE_EVENTS: + self.assertGreater(event.device_time_total, 0) + self.assertEqual(1, len(event.kernels)) + kernel = event.kernels[0] + if event.node_id == dst_cuda_0: + self.assertEqual(kernel.device, 0) + if event.node_id == dst_cuda_1: + self.assertEqual(kernel.device, 1) + self.assertGreater(event.device_time, 0) + + # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled + # events. + remote_events = [event for event in function_events if event.is_remote] + remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS] + self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS)) + + +class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def test_mismatched_type_for_options(self): + # An exception should be raised if the options are not an instance of + # TensorPipeRpcBackendOptions. + rpc_backend_options = FooBackendOptions(self.init_method) + + with self.assertRaisesRegex( + TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`" + ): + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + backend=rpc.BackendType.TENSORPIPE, + rpc_backend_options=rpc_backend_options, + ) + + def test_infer_backend_from_options(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.init_method, + _transports=tp_transports() + ) + + rpc.init_rpc( + name=worker_name(self.rank), + rank=self.rank, + world_size=self.world_size, + # Do _not_ pass backend. + rpc_backend_options=rpc_backend_options, + ) + + self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent) + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_set_and_get_num_worker_threads(self): + NUM_THREADS = 27 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=NUM_THREADS, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + info = rpc.api._get_current_rpc_agent().get_debug_info() + self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_set_default_timeout(self): + # Set a high timeout since it doesn't affect test runtime and ensures + # the test doesn't erroneously timeout due to slow machines. + timeout = 100 + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + _transports=tp_transports(), + ) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + default_timeout = rpc.get_rpc_timeout() + self.assertEqual(default_timeout, timeout) + rpc.shutdown() + + # FIXME Merge this test with the corresponding one in RpcTest. + @dist_init(setup_rpc=False) + def test_tensorpipe_options_throw_on_timedelta_timeout(self): + from datetime import timedelta + + timeout = timedelta() + # Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails + with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + rpc_timeout=timeout, + ) + + @dist_init + def _test_rref_get_type_timeout(self, blocking): + # Test where we try to get the type of a RRef from an owner, but RRef + # creation is slower than timeout passed into _get_type. + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.5 + expected_err = self.get_timeout_error_regex() + # Blocking: blocks on inline call + if blocking: + with self.assertRaisesRegex(RuntimeError, expected_err): + slow_rref._get_type(timeout=timeout, blocking=blocking) + # Non-blocking: blocks on wait + else: + fut = slow_rref._get_type(timeout=timeout, blocking=blocking) + with self.assertRaisesRegex(RuntimeError, expected_err): + fut.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + def test_rref_get_type_timeout_blocking(self): + self._test_rref_get_type_timeout(blocking=True) + + def test_rref_get_type_timeout_non_blocking(self): + self._test_rref_get_type_timeout(blocking=False) + + @dist_init + def test_op_with_invalid_args(self): + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Overloaded torch operator invoked from Python failed to match any schema" + ): + rpc.rpc_sync(dst, torch.add, args=()) + + def _test_rref_proxy_timeout(self, rref_proxy_api): + dst_rank = (self.rank + 1) % self.world_size + dst = worker_name(dst_rank) + rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), )) + # Ensure RRef is created on remote node. + rref.to_here() + rref_api = getattr(rref, rref_proxy_api) + self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}") + expected_error = self.get_timeout_error_regex() + timeout = 2 + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2)) + if rref_api == rref.rpc_async: + result.wait() + elif rref_api == rref.remote: + result._get_future().wait() + + # Case where rpc.remote() is stuck and exceeds timeout + slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True)) + timeout = 0.01 + rref_api = getattr(slow_rref, rref_proxy_api) + # Note that even when we call rref.rpc_async() in this case, we + # time out in future creation, not waiting for future. This is because + # rref proxy function calls rref._get_type before returning future, + # which blocks on the RRef being created on owner node, until the + # specified timeout. + with self.assertRaisesRegex(RuntimeError, expected_error): + result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2)) + # rpc_async returns immediately and surface a timeout through wait() + if rref_api == slow_rref.rpc_async: + result.wait() + + # FIXME We wait until the remote completed creating the OwnerRRef + # because there's currently a race if we shut down RPC before that. + slow_rref.to_here() + + @dist_init + def test_rref_proxy_timeout(self): + for rpc_api in ["rpc_sync", "rpc_async", "remote"]: + self._test_rref_proxy_timeout(rpc_api) + + @dist_init + def test_send_to_rank_sparse(self): + dst_rank = (self.rank + 1) % self.world_size + + # Test sparse tensor + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor() + y = build_sparse_tensor() + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]: + x = build_sparse_tensor(coalesce=True) + y = build_sparse_tensor(coalesce=True) + expected_tensor = (x + y) + ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y)) + self.assertEqual(expected_tensor, ret) + + @dist_init + def test_self_py_udf_remote_sparse(self): + self._self_py_udf_remote( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_rpc_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_rpc_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_rpc_arg_sparse(self): + self._self_remote_rref_as_rpc_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_remote_arg_sparse(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._self_remote_rref_as_remote_arg( + dst, + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_self_remote_rref_as_self_remote_arg_sparse(self): + self._self_remote_rref_as_remote_arg( + rpc.get_worker_info(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() + ) + + def test_world_size_one_sparse(self): + self._world_size_one( + build_sparse_tensor(), + build_sparse_tensor() + ) + + @dist_init + def test_multi_rpc_sparse(self): + self._multi_rpc(True) + + def test_wait_all_workers_sparse(self): + self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor()) + + def test_wait_all_workers_twice_sparse(self): + self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor()) + + @dist_init + def test_py_sparse_tensors_in_container(self): + n = self.rank + 1 + dst_rank = n % self.world_size + a = [build_sparse_tensor(), build_sparse_tensor()] + ret = rpc.rpc_sync( + worker_name(dst_rank), my_container_sum, args=(a,) + ) + self.assertEqual(ret, my_container_sum(a)) + + @dist_init + def test_nested_rpc_sparse(self): + self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2) + + @dist_init + def test_stress_heavy_rpc_sparse(self): + self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),)) + + @dist_init + def test_builtin_remote_ret_sparse(self): + self._builtin_remote_ret( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_builtin_remote_self_sparse(self): + self._builtin_remote_self( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 2 + ) + + @dist_init + def test_multi_builtin_remote_ret_sparse(self): + self._test_multi_remote_call( + torch.add, True, + args_fn=RpcTest._multi_args_fn + ) + + @dist_init + def test_multi_py_udf_remote_sparse(self): + self._test_multi_remote_call( + my_function, + True, + kwargs_fn=RpcTest._multi_kwargs_fn + ) + + @dist_init + def test_py_rref_args_sparse(self): + self._py_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 4 + ) + + @dist_init + def test_py_rref_args_user_share_sparse(self): + self._py_rref_args_user_share( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_py_rpc_rref_args_sparse(self): + self._py_rpc_rref_args( + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor(), + build_sparse_tensor() * 6 + ) + + @dist_init + def test_nested_remote_sparse(self): + self._nested_remote( + nested_remote_sparse, + build_sparse_tensor() + build_sparse_tensor() + ) + + @dist_init + def test_nested_rref_sparse(self): + self._nested_rref( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_nested_rref_stress_sparse(self): + self._nested_rref_stress( + nested_rref_sparse, + build_sparse_tensor() * 2, + build_sparse_tensor() * 2 + ) + + @dist_init + def test_my_parameter_server_sparse(self): + self._my_parameter_server(True) + + # Test init_rpc without world_size argument + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc(self): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + rpc.shutdown() + + # Dynamic RPC new ranks communicate with existing ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_new_rank_can_communicated_with_existing_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + if self.rank == 0: + for i in range(1, self.world_size): + result = rpc.rpc_sync(worker_name(i), torch.add, args=(torch.tensor(1), torch.tensor(1))) + self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + # Dynamic RPC existing ranks can communicate with new ranks using CUDA rpc + @skip_if_lt_x_gpu(2) + @dist_init(setup_rpc=False) + def test_dynamic_rpc_existing_rank_can_communicate_with_new_rank_cuda(self): + initialize_pg(self.file_init_method, self.rank, self.world_size) + + if self.rank == 0: + options = self.rpc_backend_options + for i in range(1, self.world_size): + dst = worker_name(i) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 1}) + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=options, + ) + + # Rank 0 will be initialized with RPC after this barrier + dist.barrier() + + # Rest of ranks join after barrier + if self.rank != 0: + # Newly joined ranks will be able to communicate with rank 0, since that was created first + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + + # TODO: Cuda RPC is failing due to: + # terminate called after throwing an instance of 'c10::Error' + # what(): 0 <= device && static_cast(device) < device_allocator.size() + # INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":1937, + # please report a bug to PyTorch. Allocator not initialized for device 1: did you call init? + # dist.barrier() + # if self.rank == 0: + # for i in range(1, self.world_size): + # x = torch.ones(2) + # result_on_device_0 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(0), 1)) + # result_on_device_1 = rpc.rpc_sync(worker_name(i), torch.add, args=(x.to(1), 1)) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_0) + # self.assertEqual(torch.device('cuda:0'), result_on_device_0.device) + # self.assertEqual(torch.add(torch.ones(2), 1), result_on_device_1) + # self.assertEqual(torch.device('cuda:1'), result_on_device_1.device) + + # Barrier to ensure that all rpc_sync calls are finished + dist.barrier() + rpc.shutdown() + + @dist_init(setup_rpc=False) + def test_dynamic_rpc_init_rpc_without_rank(self): + # default initialization uses file init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=self.rpc_backend_options, + ) + + # env init + with self.assertRaisesRegex(ValueError, "environment variable RANK expected"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + # tcp init + with self.assertRaisesRegex(ValueError, "rank parameter missing"): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456") + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rpc_backend_options=rpc_backend_options, + ) + + @dist_init(setup_rpc=False) + def test_dynamic_and_static_init_rpc_together(self): + # Initialize a static rpc group with size = self.world_size - 1 + dist.init_process_group( + backend='gloo', + init_method=self.file_init_method, + rank=self.rank, + world_size=self.world_size) + + world_size_minus_one = self.world_size - 1 + if self.rank < world_size_minus_one: + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=world_size_minus_one, + rpc_backend_options=self.rpc_backend_options, + ) + + dist.barrier() + + # Attempt to add an additional dynamic group member + if self.rank == world_size_minus_one: + # Expect error message to be thrown + with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\ + initialized members which is not supported."): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + rpc_backend_options=self.rpc_backend_options, + ) + +class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon): + + def _test_device_maps(self, options, errMsg): + with self.assertRaisesRegex(ValueError, errMsg): + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + self.assertFalse(rpc.api._is_current_rpc_agent_set()) + + @skip_if_lt_x_gpu(2) + def test_device_maps_wrong_worker_name(self): + options = self.rpc_backend_options + options.set_device_map("none_exist", {0: 1}) + + self._test_device_maps( + options, + errMsg="Node worker0 has invalid target node names in its device maps" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_local_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {torch.cuda.device_count(): 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has source devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_max_remote_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: torch.cuda.device_count()}) + + self._test_device_maps( + options, + errMsg="Node worker0 has target devices with invalid indices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_many_to_one(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + options.set_device_map(dst, {0: 0}) + + self._test_device_maps( + options, + errMsg="Node worker0 has duplicated target devices in its device map for worker1" + ) + + @skip_if_lt_x_gpu(2) + def test_device_maps_one_to_many(self): + if self.rank == 0: + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1}) + with self.assertRaisesRegex( + ValueError, "`set_device_map` only supports 1-to-1 mapping" + ): + options.set_device_map(dst, {0: 0}) + + @skip_if_lt_x_gpu(1) + def test_device_maps_invalid_min_device(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {-1: 0}) + + with self.assertRaisesRegex( + RuntimeError, "Device index must not be negative" + ): + options.set_device_map(dst, {0: -1}) + + @staticmethod + def _gpu_add(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]): + return (x + y).to(0) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_gpu(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {0: 1, 1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add, + args=(torch.zeros(2).to(0), torch.ones(2).to(0)) + ) + self.assertEqual(ret.device, torch.device(1)) + self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1)) + rpc.shutdown() + + @staticmethod + def _gpu_add_given_devices(x, y, x_to, y_to, z_to): + x_device = "cpu" if x.device.type == "cpu" else x.device.index + y_device = "cpu" if y.device.type == "cpu" else y.device.index + if x_device == x_to and y_device == y_to: + return x.to(z_to) + y.to(z_to) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None): + fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn + x_to = device_map[x_from] + y_to = device_map[y_from] + + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(x_from) + y = torch.ones(2).to(y_from) + + ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to)) + + reverse_device_map = {device_map[k] : k for k in device_map} + z_from = reverse_device_map[z_to] + + ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index + self.assertEqual(ret_device, z_from) + self.assertEqual(ret, torch.ones(2).to(z_from)) + + rpc.shutdown() + + def test_device_map_cpu(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to="cpu", + device_map={"cpu" : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_cpu_to_gpu_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=0, + device_map={"cpu" : 0}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_cpu_to_gpu_non_default(self): + self._test_device_maps_gpu( + x_from="cpu", + y_from="cpu", + z_to=1, + device_map={"cpu" : 1}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(1) + def test_device_map_gpu_to_cpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to="cpu", + device_map={0 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_to_cpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to="cpu", + device_map={1 : "cpu"}, + fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices, + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=0, + device_map={0 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=1, + device_map={1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_default_to_non_default(self): + self._test_device_maps_gpu( + x_from=0, + y_from=0, + z_to=1, + device_map={0 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_non_default_to_default(self): + self._test_device_maps_gpu( + x_from=1, + y_from=1, + z_to=0, + device_map={1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0} + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_1(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_2(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_3(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_4(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 0, 1 : 1}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_5(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_6(self): + self._test_device_maps_gpu( + x_from=0, + y_from=1, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_7(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=0, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @skip_if_lt_x_gpu(2) + def test_device_map_gpu_mixed_self_8(self): + self._test_device_maps_gpu( + x_from=1, + y_from=0, + z_to=1, + device_map={0 : 1, 1 : 0}, + dst=worker_name(self.rank) + ) + + @staticmethod + def _gpu_add_multi_gpu(x, y): + if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]): + return x.to(0) + y, x - y.to(1) + else: + raise ValueError("Wrong device affinity") + + def _test_device_maps_multi_gpu(self, dst): + options = self.rpc_backend_options + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(1) + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(x, y) + ) + + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_multi_gpu(dst) + + @skip_if_lt_x_gpu(2) + def test_device_maps_multi_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_multi_gpu(dst) + + @staticmethod + def _gpu_add_return_to_gpu(x, y): + if x.device.type == 'cpu' and y.device.type == 'cpu': + return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3) + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(2) + def test_device_maps_in_options(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc.TensorPipeRpcBackendOptions( + init_method=options.init_method, + num_worker_threads=options.num_worker_threads, + device_maps={dst: {0: 1, 1: 0}}, + _transports=tp_transports() + ) + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu, + args=(torch.zeros(2).to(0), torch.ones(2).to(1)) + ) + self.assertEqual(rets[0].device, torch.device(1)) + self.assertEqual(rets[1].device, torch.device(0)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + rpc.shutdown() + + def _test_device_maps_return_to_gpu(self, dst): + options = self.rpc_backend_options + + options.set_device_map(dst, {0: 1}) + options.set_device_map(dst, {1: 2}) + options.set_device_map(dst, {2: 3}) + options.set_device_map(dst, {3: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu, + args=(torch.zeros(2), torch.ones(2)) + ) + for i in range(len(rets)): + self.assertEqual(rets[i].device, torch.device((3 + i) % 4)) + self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3)) + self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0)) + self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1)) + self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2)) + rpc.shutdown() + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu(self): + dst = worker_name((self.rank + 1) % self.world_size) + self._test_device_maps_return_to_gpu(dst) + + @skip_if_lt_x_gpu(4) + def test_device_maps_return_to_gpu_self(self): + dst = worker_name(self.rank) + self._test_device_maps_return_to_gpu(dst) + + @staticmethod + def _add_to_gpu(x, y): + return (x + y).to(0) + + def _test_device_maps_missing_config(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = ( + "TensorPipe RPC backend only supports CPU tensors by default.*" + "`set_device_map` on `TensorPipeRpcBackendOptions`" + ) + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1)) + elif mode == RPCExecMode.REMOTE: + rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + def _test_device_maps_missing_config_response(self, mode): + dst = worker_name((self.rank + 1) % self.world_size) + errMsg = "Response device mapping is not available" + + with self.assertRaisesRegex(RuntimeError, errMsg): + if mode == RPCExecMode.SYNC: + rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + elif mode == RPCExecMode.REMOTE: + rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ).to_here() + else: + raise ValueError(f"unexpected mode {mode}") + + # make sure RPC is still functioning + ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1)) + self.assertEqual(ret, torch.ones(2) + 1) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config(self): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + def test_device_maps_missing_config_not_timeout(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=self.rpc_backend_options + ) + + timeout = rpc.get_rpc_timeout() + + tik = time.time() + self._test_device_maps_missing_config(RPCExecMode.SYNC) + rpc.shutdown() + tok = time.time() + + self.assertTrue(tok - tik < timeout) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_response_loop(self): + for _ in range(self.rpc_backend_options.num_worker_threads + 5): + self._test_device_maps_missing_config_response(RPCExecMode.SYNC) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote(self): + self._test_device_maps_missing_config(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(1) + @dist_init + def test_device_maps_missing_config_remote_response(self): + self._test_device_maps_missing_config_response(RPCExecMode.REMOTE) + + @skip_if_lt_x_gpu(2) + def test_device_maps_remote(self): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, {1: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote( + dst, + TensorPipeAgentCudaRpcTest._add_to_gpu, + args=(torch.zeros(2), 1) + ) + + self.assertEqual(rref.to_here().device.index, 1) + self.assertEqual(rref.to_here(), torch.ones(2).to(1)) + + rpc.shutdown() + + @staticmethod + def _slow_add_on_user_stream(x, y): + s0 = torch.cuda.current_stream(x.device) + s1 = torch.cuda.Stream(device=x.device) + s1.wait_stream(s0) + x.record_stream(s1) + y.record_stream(s1) + with torch.cuda.stream(s1): + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + z = x + y + s0.wait_stream(s1) + z.record_stream(s0) + return z + + def _test_custom_stream(self, fn, device_map): + options = self.rpc_backend_options + dst = worker_name((self.rank + 1) % self.world_size) + options.set_device_map(dst, device_map) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + fn(dst) + + rpc.shutdown() + + def _test_stream_sync(self, dst): + x = torch.ones(2, 2).to(0) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + self.assertEqual(ret, 2 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream(self): + self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"}) + + def _test_stream_multi_async(self, dst): + futs = [] + for i in range(20): + x = torch.ones(2, 2).to(0) * i + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, x) + ) + ) + + for i in range(20): + self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_multi(self): + self._test_custom_stream( + self._test_stream_multi_async, + {"cuda:0": "cuda:1"} + ) + + @staticmethod + def _nested_slow_add_on_user_stream(dst, x, y, z): + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._slow_add_on_user_stream, + args=(x, y) + ) + + return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z) + + def _test_stream_nested_sync(self, dst): + x = torch.ones(2, 2).to(0) + y = torch.ones(2, 2).to(0) * 2 + z = torch.ones(2, 2).to(0) * 3 + nested_dst = worker_name((self.rank + 2) % self.world_size) + ret = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + self.assertEqual(ret, 6 * x) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested(self): + self._test_custom_stream( + self._test_stream_nested_sync, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + def _test_stream_nested_multi_async(self, dst): + if self.rank == 0: + futs = [] + n = 5 + xs, ys, zs = [], [], [] + for i in range(n): + x = torch.ones(2, 2).to(0) * (i - 1) + y = torch.ones(2, 2).to(0) * i + z = torch.ones(2, 2).to(0) * (i + 1) + xs.append(x) + ys.append(y) + zs.append(z) + nested_dst = worker_name((self.rank + 2) % self.world_size) + futs.append( + rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream, + args=(nested_dst, x, y, z) + ) + ) + + for i in range(n): + self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i]) + + @skip_if_lt_x_gpu(2) + def test_custom_stream_nested_multi(self): + self._test_custom_stream( + self._test_stream_nested_multi_async, + {"cuda:0": "cuda:1", "cuda:1": "cuda:0"} + ) + + @staticmethod + def _gpu_add_wrong_gpus(x, y): + if x.is_cuda and y.is_cuda: + return x.cpu() + y.cuda() + else: + raise ValueError("Wrong device affinity") + + @skip_if_lt_x_gpu(1) + def test_device_mismatch(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0: 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + x = torch.zeros(2).to(0) + y = torch.ones(2).to(0) + + with self.assertRaisesRegex( + RuntimeError, + "Expected all tensors to be on the same device, but found at least two devices" + ): + rets = rpc.rpc_sync( + dst, + TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus, + args=(x, y) + ) + + rpc.shutdown() + + def _test_rref_synchronization(self, local_device, remote_device): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device : remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + x = torch.randn(200, 1, 28, 28).to(local_device) + actual = rref.remote().forward(x).to_here() + expected = rref.rpc_sync().forward(x) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_to_here_synchronization1(self): + self._test_rref_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization2(self): + self._test_rref_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization3(self): + self._test_rref_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_to_here_synchronization4(self): + self._test_rref_synchronization("cuda:0", "cuda:1") + + def _test_rref_as_arg_synchronization( + self, + local_device, + remote_device, + devicesOptions=None + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {local_device: remote_device}) + + input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size) + options.set_device_map(input_src, {remote_device: local_device}) + + if devicesOptions is not None: + options.set_devices(devicesOptions[self.rank]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 1: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + actual = rref.remote().forward(rref_x, True).to_here() + expected = rref.rpc_sync().forward(rref_x, True) + self.assertEqual(actual, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization1(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization2(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization3(self): + self._test_rref_as_arg_synchronization("cuda:1", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_as_arg_synchronization4(self): + self._test_rref_as_arg_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(1) + def test_rref_as_arg_synchronization5(self): + self._test_rref_as_arg_synchronization( + "cuda:0", + "cuda:0", + [["cuda:0"] for _ in range(4)], # devicesOptions + ) + + @staticmethod + def _rref_relay(rref): + return rref.to_here() + + def _test_rref_forward_synchronization(self, local_device, remote_device): + options = self.rpc_backend_options + + input_src = worker_name(0) + model_dst = worker_name(1) + out_relay = worker_name(2) + + if self.rank == 0: + # for 1) model construction 2) forward execution + options.set_device_map(model_dst, {local_device: remote_device}) + + # Forward output will be first copied to the relay node before + # returning to the worker. This is intentional, to test RRef + # forward CUDA stream synchronizations. + options.set_device_map(out_relay, {local_device: local_device}) + elif self.rank == 1: + # worker1 hosts the model and runs forward. The forward functions + # calls RRef.to_here(), hence needs to configure the device map + options.set_device_map(input_src, {remote_device: local_device}) + elif self.rank == 2: + # worker2 will get the out RRef and call to_here() and hence, needs + # to configure device map. + options.set_device_map(model_dst, {local_device: remote_device}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + if self.rank == 0: + # This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here() + # If to_here() is properly synchronized with forward(x) the results must be identical + # This test needs multiple iterations and significant batch size to simulate real + # training of a CNN of MNIST-like data. + # see https://github.com/pytorch/pytorch/issues/54771 + rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,)) + for _ in range(10): + rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device)) + rref_out = rref.remote().forward(rref_input, True) + out = rpc.remote( + out_relay, + TensorPipeAgentCudaRpcTest._rref_relay, + args=(rref_out,) + ).to_here() + expected = rref.rpc_sync().forward(rref_input, True) + self.assertEqual(out, expected) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_rref_forward_synchronization1(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization2(self): + self._test_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization3(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_rref_forward_synchronization4(self): + self._test_rref_forward_synchronization("cuda:1", "cuda:1") + + def _test_owner_rref_forward_synchronization(self, local_device, remote_device): + if self.rank == 0: + options = self.rpc_backend_options + options.set_device_map("w0", {local_device: remote_device}) + rpc.init_rpc( + "w0", + rank=0, + world_size=1, + rpc_backend_options=options + ) + + model = rpc.remote( + "w0", torch.nn.Linear, (2048, 20000) + ).remote().to(remote_device) + for _ in range(30): + data = torch.rand(2048, 2048).to(local_device) + output = model.rpc_sync().forward(data) + # to_here() internally calls localValue as the caller is + # the owner of the RRef. + v0 = rpc.RRef(output).remote().sum().to_here().item() + v1 = output.sum().item() + self.assertEqual(v0, v1) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_owner_rref_forward_synchronization1(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization2(self): + self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization3(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0") + + @skip_if_lt_x_gpu(2) + def test_owner_rref_forward_synchronization4(self): + self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1") + + @staticmethod + def _return_tensor_view(i): + x = torch.ones(1000, 200).cuda(0) * i + torch.cuda._sleep(10 * FIFTY_MIL_CYCLES) + # serialization of the return value will create a new tensor from the + # view, which is done outside of the user function. + return x.split(100)[0] + + @skip_if_lt_x_gpu(1) + def test_tensor_view_as_return_value(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + futs = [] + for i in range(5): + futs.append(rpc.rpc_async( + dst, + TensorPipeAgentCudaRpcTest._return_tensor_view, + args=(i,) + )) + + for i in range(5): + self.assertEqual(torch.ones(100, 200) * i, futs[i].wait()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected source devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {0 : 0}) + options.set_devices([1]) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(2) + def test_devices_option_mismatch_reverse(self): + with self.assertRaisesRegex( + ValueError, + "Node worker0 has unexpected target devices in its device map for worker1" + ): + dst = worker_name((self.rank + 1) % self.world_size) + + options = rpc.TensorPipeRpcBackendOptions( + init_method=self.rpc_backend_options.init_method, + num_worker_threads=self.rpc_backend_options.num_worker_threads, + device_maps={dst: {0 : 1}}, + devices=[0] + ) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_int(self): + fut = Future(devices=[0]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_str(self): + fut = Future(devices=["cuda:0"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_as_device(self): + fut = Future(devices=[torch.device("cuda", 0)]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_device_not_cuda(self): + with self.assertRaisesRegex( + ValueError, "Expected devices to have indices, got cpu" + ): + fut = Future(devices=["cpu"]) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=False + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=False + ) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_callback_changes_devices(self): + # We check proper CUDA stream synchronization by filling the tensor with + # the expected value in one stream, and reading it from another stream. + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:0", "cuda:1"]) + + def cb(fut): + t0 = fut.value() + tensor1.copy_(t0, non_blocking=True) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(2) + def test_cuda_future_value_on_bad_device(self): + tensor0 = torch.zeros((100,), device="cuda:0") + tensor1 = torch.zeros((100,), device="cuda:1") + parent_future = Future(devices=["cuda:1"]) + + # As a plus, we test that futures still invoke callbacks even in case of + # error, and that the child futures are successful if those callbacks + # don't access the parent future. + def cb(fut): + with torch.cuda.device("cuda:1"): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor1.fill_(1) + return tensor1 + + child_future = parent_future.then(cb) + with torch.cuda.device("cuda:0"): + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + torch.cuda._sleep(int(1000 * get_cycles_per_ms())) + tensor0.fill_(1) + parent_future.set_result(tensor0) + with self.assertRaisesRegex( + ValueError, + r"The result contained tensors residing on device\(s\) cuda:0 " + r"which are not among the expected device\(s\) cuda:1", + ): + parent_future.wait() + with torch.cuda.device("cuda:1"): + another_stream = torch.cuda.Stream() + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(child_future.wait(), 1).all().item()) + + @skip_if_lt_x_gpu(1) + def test_async_execution_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + t = torch.zeros((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 1).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_async_execution_nested_with_cuda_future(self): + dst = worker_name((self.rank + 1) % self.world_size) + nested_dst = worker_name((self.rank + 2) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + a = torch.ones((100,), device="cuda:0") + b = torch.ones((100,), device="cuda:0") + c = torch.ones((100,), device="cuda:0") + fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c)) + another_stream = torch.cuda.Stream("cuda:0") + with torch.cuda.stream(another_stream): + self.assertTrue(torch.eq(fut.wait(), 3).all().item()) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_modify_tensor_inplace(self): + tensor = torch.zeros((100,), device="cuda:0") + future = Future(devices=["cuda:0"]) + future.set_result(tensor) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + tensor.fill_(1) + future.wait() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_replace_tensor(self): + tensor_list = [torch.zeros((100,), device="cuda:0")] + future = Future(devices=["cuda:0"]) + future.set_result(tensor_list) + # It's weird to modify the value of a future once it's complete, but + # technically possible. Currently this is considered undefined behavior + # (in practice the future will ignore the modification and still + # synchronize with the original value). We could one day add logic to + # detect and warn or throw in such cases, but for now we just check that + # this doesn't crash. + # We set things up so that the original tensor contained in the list + # gets deleted once we replace it with the other one. This will + # invalidate any cached information held by the future. + tensor_list[0] = torch.ones((100,), device="cuda:0") + future.wait() + + @skip_if_lt_x_gpu(1) + def test_rref_with_unpickleable_attributes(self): + dst = worker_name((self.rank + 1) % self.world_size) + options = self.rpc_backend_options + options.set_device_map(dst, {"cuda:0": "cuda:0"}) + + rpc.init_rpc( + name=worker_name(self.rank), + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=options, + ) + + rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),)) + rref.rpc_sync().increase(1) + ret = rref.rpc_sync().sum() + self.assertEqual(ret, 42) + + rpc.shutdown() + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=True + ) + + @skip_if_lt_x_gpu(1) + def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self): + self._test_cuda_future_extraction( + wrapper=TensorWrapper, unwrapper=lambda v: v.tensor, sparse_tensor=True + ) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py new file mode 100644 index 0000000000000000000000000000000000000000..629b0328f1aed6d14d71589faee40a9b702a1b22 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py @@ -0,0 +1,34 @@ +# mypy: allow-untyped-defs + +import torch.distributed.rpc as rpc +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.common_distributed import ( + tp_transports, +) + + +class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture): + @property + def rpc_backend(self): + return rpc.backend_registry.BackendType[ + "TENSORPIPE" + ] + + @property + def rpc_backend_options(self): + return rpc.backend_registry.construct_rpc_backend_options( + self.rpc_backend, + init_method=self.init_method, + _transports=tp_transports() + ) + + def get_shutdown_error_regex(self): + # FIXME Once we consolidate the error messages returned by the + # TensorPipe agent put some more specific regex here. + error_regexes = [".*"] + return "|".join([f"({error_str})" for error_str in error_regexes]) + + def get_timeout_error_regex(self): + return "RPC ran for more than" diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9b714c77aa998ea68f3bb5a95ed03d3beb802f3a --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py @@ -0,0 +1,181 @@ +# mypy: allow-untyped-defs + +import os +import sys +import unittest +from typing import Dict, List, Type + +from torch.testing._internal.common_distributed import MultiProcessTestCase +from torch.testing._internal.common_utils import ( + TEST_WITH_DEV_DBG_ASAN, + find_free_port, + IS_SANDCASTLE, +) +from torch.testing._internal.distributed.ddp_under_dist_autograd_test import ( + CudaDdpComparisonTest, + DdpComparisonTest, + DdpUnderDistAutogradTest, +) +from torch.testing._internal.distributed.nn.api.remote_module_test import ( + CudaRemoteModuleTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, +) +from torch.testing._internal.distributed.rpc.dist_autograd_test import ( + DistAutogradTest, + CudaDistAutogradTest, + FaultyAgentDistAutogradTest, + TensorPipeAgentDistAutogradTest, + TensorPipeCudaDistAutogradTest +) +from torch.testing._internal.distributed.rpc.dist_optimizer_test import ( + DistOptimizerTest, +) +from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import ( + JitDistAutogradTest, +) +from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest +from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import ( + JitFaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import ( + FaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_test import ( + CudaRpcTest, + RpcTest, + TensorPipeAgentRpcTest, + TensorPipeAgentCudaRpcTest, +) +from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest +from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import ( + ReinforcementLearningRpcTest, +) + + +def _check_and_set_tcp_init(): + # if we are running with TCP init, set main address and port + # before spawning subprocesses, since different processes could find + # different ports. + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + os.environ["MASTER_ADDR"] = '127.0.0.1' + os.environ["MASTER_PORT"] = str(find_free_port()) + +def _check_and_unset_tcp_init(): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + del os.environ["MASTER_ADDR"] + del os.environ["MASTER_PORT"] + +# The tests for the RPC module need to cover multiple possible combinations: +# - different aspects of the API, each one having its own suite of tests; +# - different agents (ProcessGroup, TensorPipe, ...); +# To avoid a combinatorial explosion in code size, and to prevent forgetting to +# add a combination, these are generated automatically by the code in this file. +# Here, we collect all the test suites that we need to cover. +# We then have one separate file for each agent, from which +# we call the generate_tests function of this file, passing to it a fixture for +# the agent, which then gets mixed-in with each test suite. + +@unittest.skipIf( + TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues" +) +class SpawnHelper(MultiProcessTestCase): + def setUp(self): + super().setUp() + _check_and_set_tcp_init() + self._spawn_processes() + + def tearDown(self): + _check_and_unset_tcp_init() + super().tearDown() + + +# This list contains test suites that are agent-agnostic and that only verify +# compliance with the generic RPC interface specification. These tests should +# *not* make use of implementation details of a specific agent (options, +# attributes, ...). These test suites will be instantiated multiple times, once +# for each agent (except the faulty agent, which is special). +GENERIC_TESTS = [ + RpcTest, + ParameterServerTest, + DistAutogradTest, + DistOptimizerTest, + JitRpcTest, + JitDistAutogradTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, + DdpUnderDistAutogradTest, + DdpComparisonTest, + ReinforcementLearningRpcTest, +] +GENERIC_CUDA_TESTS = [ + CudaRpcTest, + CudaDistAutogradTest, + CudaRemoteModuleTest, + CudaDdpComparisonTest, +] + + +# This list contains test suites that will only be run on the TensorPipeAgent. +# These suites should be standalone, and separate from the ones in the generic +# list (not subclasses of those!). +TENSORPIPE_TESTS = [ + TensorPipeAgentRpcTest, + TensorPipeAgentDistAutogradTest, +] +TENSORPIPE_CUDA_TESTS = [ + TensorPipeAgentCudaRpcTest, + TensorPipeCudaDistAutogradTest, +] + + +# This list contains test suites that will only be run on the faulty RPC agent. +# That agent is special as it's only used to perform fault injection in order to +# verify the error handling behavior. Thus the faulty agent will only run the +# suites in this list, which were designed to test such behaviors, and not the +# ones in the generic list. +FAULTY_AGENT_TESTS = [ + FaultyAgentRpcTest, + FaultyAgentDistAutogradTest, + JitFaultyAgentRpcTest, +] + + +def generate_tests( + prefix: str, + mixin: Type[RpcAgentTestFixture], + tests: List[Type[RpcAgentTestFixture]], + module_name: str, +) -> Dict[str, Type[RpcAgentTestFixture]]: + """Mix in the classes needed to autogenerate the tests based on the params. + + Takes a series of test suites, each written against a "generic" agent (i.e., + derived from the abstract RpcAgentTestFixture class), as the `tests` args. + Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a + certain agent, as the `mixin` arg. Produces all combinations of them. + Returns a dictionary of class names to class type + objects which can be inserted into the global namespace of the calling + module. The name of each test will be a concatenation of the `prefix` arg + and the original name of the test suite. + The `module_name` should be the name of the calling module so + that the classes can be fixed to make it look like they belong to it, which + is necessary for pickling to work on them. + """ + ret: Dict[str, Type[RpcAgentTestFixture]] = {} + for test_class in tests: + if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN: + print( + f'Skipping test {test_class} on sandcastle for the following reason: ' + 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr) + continue + + name = f"{prefix}{test_class.__name__}" + class_ = type(name, (test_class, mixin, SpawnHelper), {}) + class_.__module__ = module_name + ret[name] = class_ + return ret diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95889fabcdb01c5a6b3da7d9dbf9b9a07e66805f Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97c38f3560625213fbd59d09a9cfd22bad26ba04 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py @@ -0,0 +1,4 @@ +# mypy: ignore-errors + +import torch.testing._internal.opinfo.core +import torch.testing._internal.opinfo.definitions diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa487e96fbe1a338037cab631d228343d69a64ce Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f3bc862f6d0ee4d0fcdc11832a303eb8e308d2 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5541209a2996598ce03d3b257a17956dc9a5541c Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d164f139e5cf6821dec4de75ccb8ceaa2434e53 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa38511d4e971b092ca19f74adf33f997ff4d7a --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py @@ -0,0 +1,2944 @@ +# mypy: ignore-errors + +import collections +import collections.abc +import math +import operator +import unittest +from dataclasses import asdict, dataclass +from enum import Enum +from functools import partial +from itertools import product +from typing import Any, Callable, Iterable, List, Optional, Tuple, Union + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + skipCPUIfNoFFT, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + get_all_dtypes, +) +from torch.testing._internal.common_utils import ( + is_iterable_of_tensors, + noncontiguous_like, + OPINFO_SAMPLE_INPUT_INDEX, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, + TrackedInputIter, +) +from torch.testing._internal.opinfo import utils +from torchgen.utils import dataclass_repr + + +# Reasonable testing sizes for dimensions +L = 20 +M = 10 +S = 5 +XS = 3 + +# Unique value to distinguish default from anything else +_NOTHING = object() + + +# Extension of getattr to support qualified names +# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm +def _getattr_qual(obj, name, default=_NOTHING): + try: + for path in name.split("."): + obj = getattr(obj, path) + return obj + except AttributeError: + if default is not _NOTHING: + return default + else: + raise + + +class DecorateInfo: + """Describes which test, or type of tests, should be wrapped in the given + decorators when testing an operator. Any test that matches all provided + arguments will be decorated. The decorators will only be applied if the + active_if argument is True.""" + + __slots__ = [ + "decorators", + "cls_name", + "test_name", + "device_type", + "dtypes", + "active_if", + ] + + def __init__( + self, + decorators, + cls_name=None, + test_name=None, + *, + device_type=None, + dtypes=None, + active_if=True, + ): + self.decorators = ( + list(decorators) + if isinstance(decorators, collections.abc.Sequence) + else [decorators] + ) + self.cls_name = cls_name + self.test_name = test_name + self.device_type = device_type + self.dtypes = dtypes + self.active_if = active_if + + # Validate dtypes + if self.dtypes is not None: + for dtype in self.dtypes: + assert isinstance(dtype, torch.dtype) + + def is_active(self, cls_name, test_name, device_type, dtype, param_kwargs): + return ( + self.active_if + and (self.cls_name is None or self.cls_name == cls_name) + and (self.test_name is None or self.test_name == test_name) + and (self.device_type is None or self.device_type == device_type) + and (self.dtypes is None or dtype in self.dtypes) + # Support callables over kwargs to determine if the decorator is active. + and ( + self.active_if(param_kwargs) + if isinstance(self.active_if, Callable) + else self.active_if + ) + ) + + +# FIXME +# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying +# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor +# or TensorList, however. +class SampleInput: + """Represents sample inputs to a function.""" + + __slots__ = [ + "input", + "args", + "kwargs", + "output_process_fn_grad", + "broadcasts_input", + "name", + ] + + def __init__( + self, + input, + *var_args, + args=None, + kwargs=None, + output_process_fn_grad=None, + broadcasts_input=None, + name=None, + **var_kwargs, + ): + # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). + # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). + self.input = input + + # Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as + # SampleInput(input, *args, **kwargs) but not to mix the two forms + if args is not None or kwargs is not None: + assert ( + not var_args and not var_kwargs + ), """ +A SampleInput can be constructed "naturally" with *args and **kwargs or by +explicitly setting the "args" and "kwargs" parameters, but the two +methods of construction cannot be mixed!""" + elif len(var_args) or len(var_kwargs): + assert ( + output_process_fn_grad is None + and broadcasts_input is None + and name is None + ), """ +A SampleInput constructed "naturally" with *args and **kwargs +cannot specify additional metadata in keyword arguments""" + + self.args = args if args is not None else var_args + assert isinstance(self.args, tuple) + self.kwargs = kwargs if kwargs is not None else var_kwargs + assert isinstance(self.kwargs, dict) + + self.output_process_fn_grad = ( + output_process_fn_grad + if output_process_fn_grad is not None + else lambda x: x + ) + self.name = name if name is not None else "" + + # Specifies if `self.input` is broadcasted or not, + # given that the operator supports broadcasting. + # This field is used to verify the behavior for inplace variant. + # + # If a SampleInput is marked with `broadcasts_input=True`, + # it is verified that we get a `RuntimeError` with this sample, + # and inplace variant. Also inplace grad{grad} tests are skipped, + # for such inputs (as they will error out otherwise). + self.broadcasts_input = ( + broadcasts_input if broadcasts_input is not None else False + ) + + def with_metadata( + self, *, output_process_fn_grad=None, broadcasts_input=None, name=None + ): + if output_process_fn_grad is not None: + self.output_process_fn_grad = output_process_fn_grad + if broadcasts_input is not None: + self.broadcasts_input = broadcasts_input + if name is not None: + self.name = name + return self + + def _repr_helper(self, formatter): + # Helper function to return the details of the SampleInput as `str` + # It consolidates all the fields of SampleInput and allows, + # formatting the fields like `input`, `args`, etc with `formatter` + # callable to customize the representation. + # Look at `summary` method for example. + arguments = [ + f"input={formatter(self.input)}", + f"args={formatter(self.args)}", + f"kwargs={formatter(self.kwargs)}", + f"broadcasts_input={self.broadcasts_input}", + f"name={repr(self.name)}", + ] + + return f'SampleInput({", ".join(a for a in arguments if a is not None)})' + + def __repr__(self): + return self._repr_helper(lambda x: x) + + def summary(self): + # Returns the SampleInput details in a more + # friendly format. + # It formats `Tensor` and `TensorList` + # in a more condensed representation. + def formatter(arg): + # Format any instance of `Tensor` (standalone, in list, or in dict) + # by Tensor[TensorShape] + # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] + if isinstance(arg, torch.Tensor): + shape = str(tuple(arg.shape)) + dtype = str(arg.dtype) + device = str(arg.device) + contiguity_suffix = "" + # NB: sparse CSR tensors annoyingly return is_sparse=False + is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr + if not is_sparse and not arg.is_contiguous(): + contiguity_suffix = ", contiguous=False" + return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]' + elif isinstance(arg, dict): + return {k: formatter(v) for k, v in arg.items()} + elif is_iterable_of_tensors(arg): + return "TensorList[" + ", ".join(map(formatter, arg)) + "]" + elif isinstance(arg, (list, tuple)): # Handle list, tuple + return "(" + ",".join(map(formatter, arg)) + ")" + + return repr(arg) + + return self._repr_helper(formatter) + + # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput + def transform(self, f): + def tt(t): + def _tt(t): + with torch.no_grad(): + return f(t) + + if isinstance(t, torch.Tensor): + return _tt(t) + elif isinstance(t, torch.dtype): + return _tt(t) + elif isinstance(t, list): + return list(map(tt, t)) + elif isinstance(t, tuple): + return tuple(map(tt, t)) + elif isinstance(t, dict): + return {k: tt(v) for k, v in t.items()} + else: + return t + + sample_tt_input, tt_args, tt_kwargs = ( + tt(self.input), + tt(self.args), + tt(self.kwargs), + ) + + # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! + return SampleInput( + sample_tt_input, + args=tt_args, + kwargs=tt_kwargs, + output_process_fn_grad=self.output_process_fn_grad, + broadcasts_input=self.broadcasts_input, + name=self.name + "_transformed", + ) + + # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) + # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them + # Converts dtypes by remapping them using torch_to_numpy_dtype_dict + def numpy(self): + def to_numpy(t): + if isinstance(t, torch.Tensor): + if t.dtype is torch.bfloat16: + return t.detach().cpu().to(torch.float32).numpy() + if t.dtype is torch.chalf: + return t.detach().cpu().to(torch.cfloat).numpy() + return t.detach().cpu().numpy() + elif isinstance(t, torch.dtype): + return torch_to_numpy_dtype_dict[t] + + return t + + return self.transform(to_numpy) + + def noncontiguous(self): + def to_noncontiguous(t): + if isinstance(t, torch.Tensor): + return noncontiguous_like(t) + elif isinstance(t, torch.dtype): + return t + + return t + + return self.transform(to_noncontiguous) + + +NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"]) + + +class ErrorInput: + """ + A SampleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["sample_input", "error_type", "error_regex"] + + def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): + self.sample_input = sample_input + self.error_type = error_type + self.error_regex = error_regex + + +class AliasInfo: + """Class holds alias information. For example, torch.abs -> + torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ + """ + + def __init__(self, alias_name): + self.name = alias_name + self.op = _getattr_qual(torch, alias_name) + self.method_variant = getattr(torch.Tensor, alias_name, None) + self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) + + def __call__(self, *args, **kwargs): + return self.op(*args, **kwargs) + + +# Note [OpInfos] +# ~~~~~~~~~~~~~~ +# +# The majority of this note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) +# See also: "Writing Test Templates" in common_device_type.py to learn how to +# parametrize a test template using OpInfos. +# See also: PyTorch's GitHub wiki on running and writing tests +# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests +# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py +# +# An OpInfo is a collection of metadata related to a PyTorch operator. This +# metadata is used to generate tests that validate properties of the operator, +# like if it implements the correct gradient formula. +# +# WHY OPINFOS? +# ~~~~~~~~~~~~ +# +# OpInfos are principally intended to do three things: +# +# 1) to allow systematic testing over all PyTorch's operators +# 2) to simplify operating testing by autogenerating many tests +# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test +# against every PyTorch operator +# +# All these goals are still a work in progress. Not every operator has an +# OpInfo, and some operator tests that could be automatically generated +# still have to be written manually. +# +# It's helpful to understand that OpInfos are both about test simplification and +# modularity. PyTorch is a complicated framework with many interrelated systems, +# too many for any one person to keep track of. An OpInfo can be thought of as the +# interface between an operator implementer and those other systems. Instead of +# requiring the implementer of torch.foo understand how to test its forward +# mode AD or NNC support that's typically handled automatically just by +# defining an OpInfo. +# +# It's often surprising to OpInfo writers that just implementing an OpInfo +# typically can't verify an operator is actually implemented correctly: +# +# "If an OpInfo doesn't validate my op works as expected, what's the point +# of it?" +# +# But the point of is the above. OpInfos are intended to let you focus on testing +# the operator logic you're familiar with instead of having to write tests for +# how the operator interacts with each of PyTorch's many systems. +# +# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES +# validate your op works as expected, but that's only in special +# cases. See below for details. +# +# WHAT'S AN OPINFO? +# ~~~~~~~~~~~~~~~~~ +# +# So what is an OpInfo? It's a Python class that describes an operator's properties, +# like which dtypes it supports on the CPU and whether it has any aliases. +# These properties can be divided into three categories: +# +# 1) Metadata describing the operator, like the operator's name and if it +# "supports" the out kwarg. +# 2) Test directives, like "skips" that tell the test suite to skip some +# tests. +# 3) A "sample inputs" function that generates valid inputs for the operator. +# +# OpInfo attributes are described in more detail below. +# +# THE SAMPLE INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The "sample inputs" function merits special elaboration. This function is +# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator +# as a black box. There's no structure for the test to understand or exploit. +# Without "sample inputs" it wouldn't even know how to call the OpInfo's +# operator. The sample input function saves the day by providing different +# "SampleInputs" that can be used to call the operator. A sample input +# function should have the following signature: +# +# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): +# +# And should return an iterable of SampleInputs (see the class description +# above). Each SampleInput defines an "input", "args", "kwargs", an +# "output_process_fn_grad" function, the "broadcasts_input" bool and a +# "name". +# +# All the "sample_inputs" functions are invoked within a `torch.no_grad()` +# environment for efficiency and correctness. As such remember to set the +# "requires_grad" flag on the inputs **after** performing any transformations +# on them. +# +# The "input" is the first argument to the operator, or the tensor that +# the method or inplace variants of the operator should be called on, and +# should be on the requested device, of the requested dtype, and its +# requires_grad attribute should be set to the requires_grad argument. +# +# "args" should contain positional arguments, and "kwargs" keyword arguments. +# +# "output_process_fn_grad" has an interesting name. It's a function that maps +# the operator's output (when given the input, args, and kwargs) to the +# portion of the output to gradcheck. For example, consider an operator +# like torch.linalg.slogdet +# (https://pytorch.org/docs/main/generated/torch.linalg.slogdet.html). +# This operator returns a tuple of two tensors, but the first tensor +# cannot be backwarded through. Its "output_process_fn_grad" filters +# this output tuple to just the second argument, which we can call backward +# on. Functions that produce a single tensor can ignore this argument. +# +# "broadcasts_input" is a bool indicated if the SampleInput causes the operator +# to broadcast the "input" argument. This is important for tests to understand +# because inplace variants of operations throw a runtime error if they +# would broadcast their input arguments, so tests that work with inplace +# variants filter SampleInputs that broadcast their input. +# +# "name" is a string that's just used for debugging. It appears when printing +# the SampleInput. +# +# Sample inputs are designed to be used with many tests, some +# that are very time consuming, so they should be a small +# set with small tensors. An elaborated set of sample inputs +# can be specified using the "reference_inputs_func" attribute. +# The "reference inputs" for an operation are an extended +# set of sample inputs that can more exhausively test an +# operator. They are used by only a few tests that are careful +# not to take too long to run. Adding reference inputs +# is highly encouraged! +# +# THE (OPTIONAL) ERROR INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# OpInfos may optionally specify "error inputs" through an error function. If +# specified test_errors in test_ops.py will call the op with these inputs +# and validate that the desired error is thrown. +# +# Error inputs automate a common testing pattern where multiple inputs are +# passed to an operation and the errors they thrown are reviewed. Tests +# written in this style should be ported to the new OpInfo pattern. +# +# Error inputs are specified using the ErrorInputs class, which contains +# a SampleInput (see above) and data about the expected error. +# +# OPINFO FILE ORGANIZATION +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# +# All OpInfos are currently defined in this file. Most OpInfo tests are defined +# in test_ops.py, but some system-specific tests are defined in those +# systems' test files, and subclass-specific tests are defined in the test +# file that corresponds to that subclass (see the below). +# Expect a reorganization in the future. +# +# WHAT'S TESTED? +# ~~~~~~~~~~~~~~ +# +# Every OpInfo in the op_db sequence has the following properties validated in +# test_ops.py: +# +# - that its supported dtypes are specified correctly +# - that the operation produces the same results when called with noncontiguous inputs +# - that it supports the out= argument properly (if it allows out=), +# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +# - that it works with the conjugate view bit properly +# - that its function, method, and inplace variants perform the same operation +# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all +# do the same thing). +# - that its inplace variant preserves the input's storage +# - that its gradient formula is implemented correctly, and that it supports +# gradgrad and complex grad and gradgrad and forward mode AD properly for +# the op's function and inplace variants (method variants are skipped +# to reduce test time). +# - that the operation performs the same operation when traced or scripted +# using the jit +# - that the operation is autodifferentiated by the jit as expected +# - that the operator's aliases, if any, perform the same operation and that +# the jit understands the alias +# - that the operator throws the correct errors (if error_inputs is defined) +# - that the operator produces the same results as a NumPy reference (if ref is defined) +# - that the operator produces the same results as a NumPy reference on an extended +# set of "reference inputs" (if both ref and reference_inputs_func are defined) +# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only +# ref is defined, because they effectively autogenerate reference inputs) +# - that the operator works on different CUDA devices +# +# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, +# and test_fx.py. These tests validate that operators work with NNC and FX +# as expected. +# +# For performance, some of the above tests may only run on the first +# SampleInput returned by an OpInfo's sample input function. +# +# In addition to these tests, some subclasses (discussed in the next section) +# define additional tests. +# +# Critically, as mentioned above, what's not necessarily tested is that the operator +# works as expected. When implementing an OpInfo an engineer must still +# typically write one or more tests validating the operator's behavior. +# The exception to this is if reference testing is sufficient, or if +# the operation belongs to an OpInfo subclass that has more exhaustive +# operator testing. Elementwise unary and elementwise binary operators, +# in particular, usually don't require additional testing beyond +# writing an Opinfo. +# +# +# OPINFO (SUB)CLASSES +# ~~~~~~~~~~~~~~~~~~~ +# +# In addition to the OpInfo base class there are several specialized OpInfo +# subclasses. For example, the UnaryUfuncInfo subclass is used for +# unary elementwise operations. These operations have a common structure +# that test_unary_ufuncs.py exploits with additional automated testing. +# The automated testing in test_unary_ufuncs.py is so thorough, comparing +# the operator to a NumPy reference function on a plethora of values, that +# just implementing an OpInfo for a unary elementwise operation is often +# sufficient testing. +# +# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a +# very unique class of operations. These OpInfos aren't included in the +# op_db sequence and have their own tests. +# +# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience +# when writing OpInfos. +# +# TESTING A NEW OPERATOR +# ~~~~~~~~~~~~~~~~~~~~~~ +# +# If you're adding a new operator to any of the following namespaces: +# - torch +# - torch.fft +# - torch.linalg, +# - torch.special +# - torch.nn.functional +# then you should typically add an OpInfo for it. +# +# As mentioned a couple times above, implementing an OpInfo is not +# usually sufficient testing (unless the operator is a unary or binary elementwise +# operator). The OpInfo will only test the properties described in the +# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is +# implemented correctly. +# +# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to +# be consumed by a variety of systems it can be hard to understand how to +# deal with test failures or how to set the OpInfo metadata properly. +# +# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs +# function must be defined, and the operator's dtypes must be specified. +# Once that's done you should run the operator's tests in test_ops.py +# (these can be filtered using the "-k" argument in pytest). Tests that +# fail should provide an error message that describes what to change about +# your OpInfo. You don't need to worry about changing an OpInfo's default +# values unless a test yells at you. +# +# Similarly, if you're writing a test that consumes OpInfos then it's critical +# your test provides a clear error message describing what to do when it +# fails. You should not assume the OpInfo implementer is familiar with your +# system. +# +# If you see a confusing error message while developing an OpInfo then please +# file an issue describing what happened. +# +# This trial-and-error approach to writing an OpInfo can be frustrating, +# but it's probably necessary as long as OpInfos don't require +# learning about all the systems that consume them. One thing that can help +# is the get_supported_dtypes() function defined in utils.py. This +# function can be used to programmatically specify the dtypes an operator +# supports, and is especially useful if writing an OpInfo on a machine +# without a CUDA device. See its documentation for more details. +# +# THE FUTURE OF OPINFOS AND OPINFO TESTING +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In the future we expect OpInfo coverage to improve and cover +# the great majority of PyTorch's (public) operators. +# + + +# Classes and methods for the operator database +@dataclass +class OpInfo: + """Operator information and helper functions for acquiring it.""" + + # the string name of the function + name: str + + # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). + # If given, the op will be compared with its reference on each of its sample inputs. + ref: Optional[Callable] = None + + # the following metadata describes the operator, its variants, and its aliases, if any + + # iterable of aliases, e.g. ("absolute",) for torch.abs + aliases: Iterable = None + + # additional string to include in the test name + # this is useful when an op needs multiple OpInfos, + # like divide does, often because it's really several + # different ops behind the scenes + variant_test_name: str = "" + + # the function variant of the operation, populated as torch. if None + op: Callable = None + + # allows the method variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated method + # - if a Callable, then that callable should be the method associated with this operation + method_variant: Callable = _NOTHING + + # allows the inplace variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace variant + # - if a Callable, then that callable should be the inplace variant associated with this operation + inplace_variant: Callable = _NOTHING + + # allows the operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated operator + # - if a Callable, then that callable should be the operator associated with this operation + operator_variant: Callable = _NOTHING + + # allows the inplace operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace operator + # - if a Callable, then that callable should be the inplace operator associated with this operation + inplace_operator_variant: Callable = _NOTHING + + # the following metadata are test directives for skipping or modifying tests + + # information about which tests to skip + skips: Tuple = () + + # decorators to apply to generated tests + decorators: Tuple = () + + # the following are pointers to functions to generate certain classes of inputs + + # function to generate sample inputs with strided layouts + sample_inputs_func: Callable = None + + # function to generate a more thorough set of samples inputs with strided layouts + reference_inputs_func: Callable = None + + # function to generate inputs that will throw errors + error_inputs_func: Callable = None + + # function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors + error_inputs_sparse_func: Callable = None + + # function to generate sample inputs with sparse coo layouts + sample_inputs_sparse_coo_func: Callable = None + + # function to generate sample inputs with sparse csr layouts + sample_inputs_sparse_csr_func: Callable = None + + # function to generate sample inputs with sparse csc layouts + sample_inputs_sparse_csc_func: Callable = None + + # function to generate sample inputs with sparse bsr layouts + sample_inputs_sparse_bsr_func: Callable = None + + # function to generate sample inputs with sparse bsc layouts + sample_inputs_sparse_bsc_func: Callable = None + + # the following metadata relates to dtype support and is tested for correctness in test_ops.py + + # dtypes this function works with on the CPU, + # inherited by other device types that don't specify their own dtypes + dtypes: _dispatch_dtypes = None + + # the following dtypesIf... options override the dtypes value on their respective device types + + # dtypes this function is expected to work with on CUDA + dtypesIfCUDA: _dispatch_dtypes = None + + # dtypes this function is expected to work with on ROCM + dtypesIfROCM: _dispatch_dtypes = None + + dtypesIfHpu: _dispatch_dtypes = None + + # dtypes this function is expected to work with on XPU + dtypesIfXPU: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with + backward_dtypes: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on CUDA + backward_dtypesIfCUDA: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on ROCM + backward_dtypesIfROCM: _dispatch_dtypes = None + + backward_dtypesIfHpu: _dispatch_dtypes = None + + # the following metadata describes the operators out= support + + # whether the op supports the out kwarg + # defaults to True, if the op does not allow the out kwarg or + # supports it incorrectly then test_out in test_ops.py should fail + supports_out: bool = True + + # the following metadata relates to autograd support + # whether the operation supports backward mode AD + # if true, gradient correctness is tested in test_ops.py + # using the op's sample inputs + supports_autograd: bool = True + + # whether the op supports second order gradients + # if true, gradgrad correctness is tested in test_ops.py + # defaults to support_autograd's value + # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below + supports_gradgrad: bool = None + + # whether the ops supports second order gradients via + # forward-over-reverse. If True, forward-over-reverse gradgrad correctness + # is tested. If False, test that forward grad is not implemented. + # Defaults to False. + supports_fwgrad_bwgrad: bool = False + + # whether the operation supports inplace autograd + # if true, tested in test_ops.py + # defaults to supports_autograd's value + supports_inplace_autograd: bool = None + + # Whether the operation support forward mode AD + # If the value is True, we check that the gradients are correct + # If the value is False, we test that forward grad is not implemented + supports_forward_ad: bool = False + + # Whether the operation has a varargs variant + # (e.g. functions like ones, zeros, methods like view, permute) + supports_varargs: bool = False + + # Whether the forward operation avoids materializing COW tensor inputs + supports_cow_input_no_materialize_forward: bool = True + + # Whether the backward operation avoids materializing COW tensor inputs + supports_cow_input_no_materialize_backward: bool = True + + # Whether to skip the backward part of the COW tensor input test + skip_cow_input_backward: bool = False + + # If `supports_cow_input_no_materialize_forward == True`, this list contains + # the arg indices or kwarg names of inputs that are expected to materialize + allow_cow_input_materialize_forward: List[Union[int, str]] = None + + # If `supports_cow_input_no_materialize_backward == True`, this list contains + # the arg indices or kwarg names of inputs that are expected to materialize + allow_cow_input_materialize_backward: List[Union[int, str]] = None + + # wrapper function for gradcheck + gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) + + # whether to check batched grad when doing gradcheck + # defaults to support_autograd's value + check_batched_grad: bool = None + + # whether to check batched grad grad when doing gradgradcheck + # default's to support_gradgrad's value + check_batched_gradgrad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `supports_forward_ad` + check_batched_forward_grad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `check_batched_forward_grad` + check_inplace_batched_forward_grad: bool = None + + # tolerance for nondeterminism while performing gradcheck + gradcheck_nondet_tol: float = 0.0 + + # Whether to use the fast implmentation for gradcheck/gradgradcheck. + # When set to None, defers to the default value provided by the wrapper + # function around gradcheck (testing._internal.common_utils.gradcheck) + gradcheck_fast_mode: bool = None + + # the following metadata relates to JIT support and is tested for correctness in test_ops.py + + # name of the corresponding aten:: operator + aten_name: str = None + + # if this is a composite implicit autograd op, the decomposed op + decomp_aten_name: Optional[str] = None + + # name of the corresponding aten:: operator for backwards + aten_backward_name: Optional[str] = None + + # if a op's aten::node is expected to be symbolically autodiffed + assert_autodiffed: bool = False + + # a list of strings with node names that are expected to be in a + # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], + # default is populated to be ['aten::(name of Python operator)'] + autodiff_nonfusible_nodes: List[str] = None + + # a list of strings with node names that are expected to be in FusionGroups + # inside of DifferentiableGraphs when this operation is autodiffed. + # Ex: ['aten::add', 'aten::mm'], defaults to an empty list + # Note: currently no ops use fusible nodes + autodiff_fusible_nodes: List[str] = None + + # the following metadata relates to sparse support and is used in test_sparse.py + + # whether the op supports sparse coo inputs, defaults to False + # TODO: rename supports_sparse to supports_sparse_coo + supports_sparse: bool = None + + # only run tracing tests + supports_scripting: bool = True + + # if the operator can be traced + supports_tracing: bool = True + + # the following metadata relates to sparse compressed support and + # is used in test_sparse_csr.py and test_sparse.py + + # whether the op supports sparse csr inputs, defaults to False + supports_sparse_csr: bool = None + # whether the op supports sparse csc inputs, defaults to False + supports_sparse_csc: bool = None + # whether the op supports sparse bsr inputs, defaults to False + supports_sparse_bsr: bool = None + # whether the op supports sparse bsc inputs, defaults to False + supports_sparse_bsc: bool = None + # whether the op supports nested jagged inputs, defaults to False + supports_njt: bool = None + + # whether the op promotes integer inputs to float + promotes_int_to_float: bool = False + + # the following metadata relates to complex support and is checked in test_ops.py + + test_conjugated_samples: bool = True + + test_neg_view: bool = True + + # assert that jit shape analysis fully propagates shape + assert_jit_shape_analysis: bool = False + + # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py + + supports_expanded_weight: bool = False + + is_factory_function: bool = False + + def __post_init__(self): + self._original_opinfo_args = asdict(self).copy() + + assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!" + + dtypes_args = ( + self.dtypes, + self.dtypesIfCUDA, + self.dtypesIfROCM, + self.dtypesIfXPU, + ) + + # Validates the dtypes are generated from the dispatch-related functions + for dtype_list in dtypes_args: + assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) + + if self.aten_name is None: + self.aten_name = self.name + + # Attribute to verify dynamic_dtypes are used. + self.dynamic_dtypes = any( + isinstance(dtypes, utils._dynamic_dispatch_dtypes) for dtypes in dtypes_args + ) + + if self.dynamic_dtypes: + # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU + # This is because, below we set dtypesIfCUDA to dtypes if they are None. + assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), ( + f"To use dynamic dypes for operator {self.name}, " + "acquire the dtypes dynamically for argument `dtypesIfCUDA`." + "This is to ensure that CUDA dtypes are acquired correctly as they" + "differ from CPU dtypes occasionally" + ) + + self.dtypes = set(self.dtypes) + + # NOTE: backward dtypes must be acquired before forward dtypes + # since they fallback to explicit (not implicit!) specifications of + # forward dtypes + self.backward_dtypesIfROCM = ( + set(self.backward_dtypesIfROCM) + if self.backward_dtypesIfROCM is not None + else ( + self.backward_dtypesIfCUDA + if self.backward_dtypesIfCUDA is not None + else self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfROCM + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfCUDA = ( + set(self.backward_dtypesIfCUDA) + if self.backward_dtypesIfCUDA is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfHpu = ( + set(self.backward_dtypesIfHpu) + if self.backward_dtypesIfHpu is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypes + ) + ) + + self.backward_dtypes = ( + set(self.backward_dtypes) + if self.backward_dtypes is not None + else self.dtypes + ) + + self.dtypesIfCUDA = ( + set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes + ) + self.dtypesIfROCM = ( + set(self.dtypesIfROCM) + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + ) + self.dtypesIfXPU = ( + set(self.dtypesIfXPU) if self.dtypesIfXPU is not None else self.dtypesIfCUDA + ) + + self.dtypesIfHpu = ( + set(self.dtypesIfHpu) if self.dtypesIfHpu is not None else self.dtypes + ) + + # NOTE: if the op is unspecified it is assumed to be under the torch namespace + if not self.op: + self.op = _getattr_qual(torch, self.name) + + if self.method_variant is _NOTHING: + self.method_variant = getattr(torch.Tensor, self.name, None) + + # attributes like real, imag are not callable + if not callable(self.method_variant): + self.method_variant = None + + if self.inplace_variant is _NOTHING: + inplace_name = self.name + "_" + self.inplace_variant = getattr(torch.Tensor, inplace_name, None) + + if self.operator_variant is _NOTHING: + self.operator_variant = getattr(operator, self.name, None) + + if self.inplace_operator_variant is _NOTHING: + # Note: operator.i will use operator. and assign the result to the lhs when no + # __i__ method is found. This results in the appearance of an inplace operator variant which + # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace + # operator with a check that an inplace variant exists. + if self.inplace_variant is not None: + inplace_operator_name = "i" + self.name + self.inplace_operator_variant = getattr( + operator, inplace_operator_name, None + ) + else: + self.inplace_operator_variant = None + + self.decorators = (*self.decorators, *self.skips) + + # Specifying sample inputs function without specifying the + # corresponding layout support implies the layout support: + if self.supports_sparse is None: + self.supports_sparse = self.sample_inputs_sparse_coo_func is not None + if self.sample_inputs_sparse_coo_func is None: + self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified + + if self.supports_sparse_csr is None: + self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None + if self.sample_inputs_sparse_csr_func is None: + self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified + + if self.supports_sparse_csc is None: + self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None + if self.sample_inputs_sparse_csc_func is None: + self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsr is None: + self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None + if self.sample_inputs_sparse_bsr_func is None: + self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsc is None: + self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None + if self.sample_inputs_sparse_bsc_func is None: + self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified + + if self.supports_njt is None: + self.supports_njt = False + + # We run the sampling functions without tracking the gradiends of the creation of inputs + self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) + self.sample_inputs_sparse_coo_func = torch.no_grad()( + self.sample_inputs_sparse_coo_func + ) + self.sample_inputs_sparse_csr_func = torch.no_grad()( + self.sample_inputs_sparse_csr_func + ) + self.sample_inputs_sparse_csc_func = torch.no_grad()( + self.sample_inputs_sparse_csc_func + ) + self.sample_inputs_sparse_bsr_func = torch.no_grad()( + self.sample_inputs_sparse_bsr_func + ) + self.sample_inputs_sparse_bsc_func = torch.no_grad()( + self.sample_inputs_sparse_bsc_func + ) + if self.reference_inputs_func is not None: + self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) + + if not self.autodiff_fusible_nodes: + self.autodiff_fusible_nodes = [] + + if self.autodiff_nonfusible_nodes is None: + self.autodiff_nonfusible_nodes = ["aten::" + self.name] + + # Autograd support + + # Autograd flags that depend on backward AD only + # - If setting has been explicitly set, raise error if inconsistent + if self.supports_gradgrad is None: + self.supports_gradgrad = self.supports_autograd + else: + assert not (self.supports_gradgrad and not self.supports_autograd), ( + "supports_gradgrad refines the part of autograd is supported, so it should " + "not be set if supports_autograd is False" + ) + if self.check_batched_grad is None: + self.check_batched_grad = self.supports_autograd or self.supports_forward_ad + else: + assert not ( + self.check_batched_grad + and not (self.supports_autograd or self.supports_forward_ad) + ), ( + "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " + "it should not be set if supports_autograd is False" + ) + if self.check_batched_gradgrad is None: + self.check_batched_gradgrad = self.supports_gradgrad + else: + assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( + "check_batched_gradgrad refines the part of autograd that will be checked (by " + "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " + "is False." + ) + if self.check_batched_forward_grad is None: + self.check_batched_forward_grad = self.supports_forward_ad + else: + assert not ( + self.check_batched_forward_grad and not self.supports_forward_ad + ), ( + "check_batched_forward_grad should only be used when supports_forward_ad " + "is True. It is used to disable the test in the specific cases " + "where the op supports forward ad but fails to compute " + "batched forward grad." + ) + + if self.check_inplace_batched_forward_grad is None: + self.check_inplace_batched_forward_grad = self.check_batched_forward_grad + else: + assert not ( + self.check_inplace_batched_forward_grad + and not self.check_batched_forward_grad + ), ( + "check_batched_forward_grad should only be used when check_batched_forward_grad " + "is True. It is used to disable the test in the specific cases " + "where the op supports batched forward grad but fails to compute batched forward " + "grad for the inplace variant of the op." + ) + + assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( + "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " + "True if backward ad is also checked, i.e., supports_forward_ad should be True.", + self.name, + ) + + # Autograd flags that depend on both forward AD and backward AD + if self.supports_inplace_autograd is None: + self.supports_inplace_autograd = ( + self.supports_autograd or self.supports_forward_ad + ) + else: + assert not ( + self.supports_inplace_autograd + and not self.supports_autograd + and not self.supports_forward_ad + ), ( + "supports_inplace_autograd refines the part of autograd that is supported, so " + "it should not be set if both supports_autograd and supports_forward_ad are False" + ) + + if self.aliases is not None: + self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] + else: + self.aliases = () + + def __call__(self, *args, **kwargs): + """Calls the function variant of the operator.""" + return self.op(*args, **kwargs) + + def __str__(self): + return dataclass_repr(self) + + def get_op(self): + """Returns the function variant of the operator, torch..""" + return self.op + + def get_method(self): + """Returns the method variant of the operator, torch.Tensor.. + Returns None if the operator has no method variant. + """ + return self.method_variant + + def get_inplace(self): + """Returns the inplace variant of the operator, torch.Tensor._. + Returns None if the operator has no inplace variant. + """ + return self.inplace_variant + + def get_operator(self): + """Returns operator variant of the operator, e.g. operator.neg + Returns None if the operator has no operator variant. + """ + return self.operator_variant + + def get_inplace_operator(self): + """Returns the inplace operator variant of the operator, e.g operator.iadd + Returns None if the operator has no inplace operator variant""" + return self.inplace_operator_variant + + def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs but with the tensor input or first + tensor in a sequence input conjugated. + """ + + set_seed = kwargs.pop("set_seed", True) + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + conj_samples = list(samples) + + def conjugate(tensor): + _requires_grad = tensor.requires_grad + tensor = tensor.conj() + return tensor.requires_grad_(_requires_grad) + + for i, sample in enumerate(samples): + sample = conj_samples[i] + # Note: it is assumed that the input here is either a tensor or tensorlist + if isinstance(sample.input, torch.Tensor): + sample.input = conjugate(sample.input) + else: + sample.input[0] = conjugate(sample.input[0]) + + return TrackedInputIter( + iter(conj_samples), + "conjugate sample input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + These samples should be sufficient to test the function works correctly + with autograd, TorchScript, etc. + """ + set_seed = kwargs.pop("set_seed", True) + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + + if kwargs.get("include_conjugated_inputs", False): + conj_samples = self.conjugate_sample_inputs( + device, dtype, requires_grad, **kwargs + ) + samples_list = list(samples) + samples_list.extend(conj_samples) + samples = tuple(samples_list) + + return TrackedInputIter( + iter(samples), + "sample input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + Distinct from sample_inputs() above because this returns an expanded set + of inputs when reference_inputs_func is defined. If undefined this returns + the sample inputs. + """ + set_seed = kwargs.pop("set_seed", True) + if self.reference_inputs_func is None: + samples = self.sample_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter( + iter(samples), + "reference input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + if kwargs.get("include_conjugated_inputs", False): + raise NotImplementedError + + references = self.reference_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter( + iter(references), + "reference input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def error_inputs(self, device, **kwargs): + """ + Returns an iterable of ErrorInputs. + """ + set_seed = kwargs.pop("set_seed", True) + errs = self.error_inputs_func(self, device, **kwargs) + return TrackedInputIter( + iter(errs), + "error input", + callback=lambda e: e.sample_input, + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def error_inputs_sparse(self, device, layout, **kwargs): + """ + Returns an iterable of ErrorInputs that contain sparse sample + inputs with a specified layout. + """ + if not self.supports_sparse_layout(layout): + raise unittest.SkipTest("unsupported sparse layout") + return self.error_inputs_sparse_func(self, device, layout, **kwargs) + + def supports_sparse_layout(self, layout): + """Return True if OpInfo supports the specified sparse layout.""" + layout_name = str(layout).split(".")[-1] + # map torch.sparse_coo to OpInfo.supports_sparse: + layout_name = layout_name.replace("_coo", "") + return getattr(self, f"supports_{layout_name}") + + def sample_inputs_sparse( + self, layout, device, dtype, requires_grad=False, **kwargs + ): + """Returns an iterable of SampleInputs that contain inputs with a + specified sparse layout. + """ + layout_name = str(layout).split(".")[-1] + sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name) + + def non_empty_sampler(op, generator): + found_sample = False + for sample in generator: + found_sample = True + yield sample + if not found_sample: + raise unittest.SkipTest("NO SAMPLES!") + + return non_empty_sampler( + self, + sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs), + ) + + def _sample_inputs_unspecified(self, *args, **kwargs): + """Raises an NotImplemented exception in a OpInfo instance creation + that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True + without specifying the corresponding sample function as + sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func. + + To avoid this, either define the corresponding sample function, + or re-map unsupported samples to error inputs in an appropiate + + opinfo/definitions/sparse.py:_validate_sample_input_sparse_ + + function. + """ + raise NotImplementedError("no sample function specified") + + def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + coo layout. + """ + return self.sample_inputs_sparse_coo_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csr layout. + """ + return self.sample_inputs_sparse_csr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csc layout. + """ + return self.sample_inputs_sparse_csc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsr layout. + """ + return self.sample_inputs_sparse_bsr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsc layout. + """ + return self.sample_inputs_sparse_bsc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + """Returns the decorators targeting the given test.""" + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + if device_type == "cuda": + return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA + if device_type == "xpu": + return self.dtypesIfXPU + if device_type == "hpu": + return self.dtypesIfHpu + return self.dtypes + + def supported_backward_dtypes(self, device_type): + if not self.supports_autograd: + return set() + + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + backward_dtypes = None + if device_type == "cuda": + backward_dtypes = ( + self.backward_dtypesIfROCM + if TEST_WITH_ROCM + else self.backward_dtypesIfCUDA + ) + elif device_type == "hpu": + backward_dtype = self.backward_dtypesIfHpu + else: + backward_dtypes = self.backward_dtypes + + allowed_backward_dtypes = floating_and_complex_types_and( + torch.bfloat16, torch.float16, torch.complex32 + ) + return set(allowed_backward_dtypes).intersection(backward_dtypes) + + def supports_dtype(self, dtype, device_type) -> bool: + return dtype in self.supported_dtypes(device_type) + + @property + def full_name(self): + """Returns a full name that helps to uniquely identify this OpInfo.""" + variant = "." + self.variant_test_name if self.variant_test_name else "" + # example: "normal.in_place" where "normal" is the name and "in_place" is the variant + return f"{self.name}{variant}" + + @property + def formatted_name(self): + """Returns a formatted full name for this OpInfo that can be used in test names.""" + return self.full_name.replace(".", "_") + + +def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): + """Generates input tensors for testing reduction operators""" + yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor( + [3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad + ) + + +def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): + """Generates a subset of all valid dim and keepdim kwargs given ndim that + is appropriate for testing reduction operators. + """ + + # Test default dim and keepdim + yield {} + + # Test reducing inner and outer most dimensions + yield {"dim": 0, "keepdim": True} + yield {"dim": -1, "keepdim": False} + + # Test reducing middle dimension + if ndim > 2: + yield {"dim": ndim // 2, "keepdim": True} + + if supports_multiple_dims: + # Test reducing all dimensions + yield {"dim": tuple(range(ndim)), "keepdim": False} + + # Test reducing both first and last dimensions + if ndim > 1: + yield {"dim": (0, -1), "keepdim": True} + + # Test reducing every other dimension starting with the second + if ndim > 3: + yield {"dim": tuple(range(1, ndim, 2)), "keepdim": False} + + +def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for reduction operators.""" + + # TODO(@heitorschueroff) Once all reduction operators are using + # ReductionOpInfo use op_info.supports_multiple_dims directly. + supports_multiple_dims: bool = kwargs.get("supports_multiple_dims", True) + + # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo + # use op_info.generate_args_kwargs directly. + generate_args_kwargs = kwargs.get( + "generate_args_kwargs", lambda *args, **kwargs: (yield (), {}) + ) + + for t in _generate_reduction_inputs(device, dtype, requires_grad): + for reduction_kwargs in _generate_reduction_kwargs( + t.ndim, supports_multiple_dims + ): + for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): + kwargs.update(reduction_kwargs) + yield SampleInput( + t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs + ) + + +# NOTE [Reductions]: +# +# For testing purposes, we relax the definition of a reduction operator +# as defined in the docstring below. We do this to capture operators with +# a similar API so they can be tested automatically. However... +# +# Strictly speaking a reduction operator is an operator that can reduce an +# array to a single scalar value and that can be computed from the partial +# result of reducing subarrays. This usually means that the reduction operation +# should be commutative and associative. This definition is important when it +# comes to implementation as it determines how a reduction can be parallelized. +# +# For example, many summary statistics such as median, mode and quantile cannot +# be computed from partial results because these are sorting and counting based +# algorithms that need information that would be lost in the reduced value. +class ReductionOpInfo(OpInfo): + """Reduction operator information. + + An operator is a reduction operator if it reduces one or more dimensions of + the input tensor to a single value. Reduction operators must implement the + following signature: + + - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` + + ReductionOpInfo tests that reduction operators implement a consistent API. + Optional features such as reducing over multiple dimensions are captured in + the optional keyword parameters of the ReductionOpInfo constructor. + + If a reduction operator does not yet implement the full required API of + reduction operators, this should be documented by xfailing the failing + tests rather than adding optional parameters to ReductionOpInfo. + + NOTE + The API for reduction operators has not yet been finalized and some + requirements may change. + + See tests in test/test_reductions.py + """ + + def __init__( + self, + name, + *, + # The identity value for the operator if it has one. + identity: Optional[Any] = None, + # The nan policy for the operator if it implements one. + # - propagate: NaN values are propagated to the output + # - omit: NaN values are discarded during the reduction + nan_policy: Optional[str] = None, + # Whether the operator supports reducing multiple dimensions. + supports_multiple_dims: bool = True, + # Whether the operator promotes integral to floating point dtypes. + promotes_int_to_float: bool = False, + # Whether the operator promotes all integral dtypes to int64. + promotes_int_to_int64: bool = False, + # If a specific dtype is given, then the operator always returns that + # dtype irrespective of the input dtype. If None, the operator returns + # the dtype according to the type promotion rules above. + result_dtype: Optional[torch.dtype] = None, + # Casts complex results to real (e.g. linalg.norm or torch.var) + complex_to_real: bool = False, + # ReductionOpInfo tests generate their own input, dim and keepdim + # arguments and call this function to generate tuples of extra args and + # kwargs to use when calling the op. This is required for operators that + # have other required parameters besides the input tensor. + generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: ( + yield (), + {}, + ), + # Options from the OpInfo base class + **kwargs, + ): + self._original_reduction_args = locals().copy() + assert nan_policy in (None, "propagate", "omit") + + # These are mutually exclusive options + assert not (result_dtype and promotes_int_to_float) + assert not (result_dtype and promotes_int_to_int64) + assert not (result_dtype and complex_to_real) + assert not (promotes_int_to_float and promotes_int_to_int64) + + # Default sample_inputs_func for ReductionOpInfo which augments sample + # inputs from sample_inputs_reduction with the args and kwargs from + # generate_args_kwargs. This is only used if sample_inputs_func is None. + def sample_inputs_func(*args, **kwargs): + kwargs["supports_multiple_dims"] = supports_multiple_dims + kwargs["generate_args_kwargs"] = generate_args_kwargs + yield from sample_inputs_reduction(*args, **kwargs) + + # Override OpInfo defaults and call base class __init__ + kwargs.setdefault("inplace_variant", None) + kwargs.setdefault("sample_inputs_func", sample_inputs_func) + super().__init__(name, promotes_int_to_float=promotes_int_to_float, **kwargs) + + self.identity = identity + self.nan_policy = nan_policy + self.supports_multiple_dims = supports_multiple_dims + self.promotes_int_to_int64 = promotes_int_to_int64 + self.complex_to_real = complex_to_real + self.result_dtype = result_dtype + self.generate_args_kwargs = generate_args_kwargs + + +# The base reference input generation for elementwise binary operations +def _reference_inputs_elementwise_binary( + op, device, dtype, requires_grad, exclude_zero, **kwargs +): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + yield from generate_elementwise_binary_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + if dtype is not torch.bool: + yield from generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + if dtype not in (torch.bool, torch.uint8, torch.int8): + yield from generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + yield from generate_elementwise_binary_broadcasting_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield from generate_elementwise_binary_with_scalar_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + if dtype.is_floating_point or dtype.is_complex: + yield from generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + +# Note that these references inputs use scalars for the SampleInput.input value, +# and many tests require SampleInput.input be a tensor or a list of tensors +def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + gen = partial( + _reference_inputs_elementwise_binary, + op, + device, + dtype, + requires_grad, + exclude_zero, + **kwargs, + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_binary_noncontiguous_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + yield from generate_elementwise_binary_arbitrarily_strided_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + +# A functional that extends an elementwise binary operator's bespoke error inputs +# with generic error inputs for the class of elementwise binary operations +def make_error_inputs_elementwise_binary(error_inputs_func): + def error_inputs_func_wrapper(op, device, **kwargs): + if error_inputs_func is not None: + yield from error_inputs_func(op, device, **kwargs) + + if not op.supports_rhs_python_scalar: + si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if not op.supports_one_python_scalar: + si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if ( + not kwargs.get("skip_two_python_scalars", False) + and not op.supports_two_python_scalars + ): + si = SampleInput(2, args=(3,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + return error_inputs_func_wrapper + + +# The following functions and classes are for testing elementwise binary operators. + + +# Returns a generator of pairs of contiguous tensors on the requested device +# and with the requested dtype. +# +# This function is intended to test the non-vectorized and vectorized code +# paths of elementwise binary functions, as well as their handling of odd tensor +# sizes (like zero-dim tensors and tensors with zero elements). +# +# Each iterable will include an a tensor with no elements, +# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and +# a large 2D tensor. +def generate_elementwise_binary_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + # tensors with no elements + (0,), + (1, 0, 3), + # zero dim (scalar) tensor + (), + # small 1D tensor + (20,), + # medium 1D tensor + (812,), + # large 2D tensor + (1029, 917), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + b = make_arg(shape) + yield SampleInput(a, args=(b,)) + + +# Returns a generator of pairs of contiguous tensors on the requested device and with +# the requested dtype. +# +# Unlike the previous function, the values in these tensors are specified manually. +def generate_elementwise_binary_small_value_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=None +): + if exclude_zero is None: + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + # defines interesting values + _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) + _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) + _float_vals = ( + 0.0, + -0.0, + -0.001, + 0.001, + -0.25, + 0.25, + -1.0, + 1.0, + -math.pi / 2, + math.pi / 2, + -math.pi + 0.00001, + math.pi - 0.00001, + -math.pi, + math.pi, + -math.pi - 0.00001, + math.pi + 0.00001, + ) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_vals, _float_vals) + elif dtype.is_complex: + complex_vals = product(_float_vals, _float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): + prod = product(_int_vals, _int_vals) + elif dtype is torch.uint8: + prod = product(_unsigned_int_vals, _unsigned_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + if r == 0 and exclude_zero: + r_vals.append(1) + else: + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + _large_int_vals = (-1113, 1113, -10701, 10701) + _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) + _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) + + l_vals = [] + r_vals = [] + + if dtype == torch.float16: + prod = product(_large_float16_vals, _large_float16_vals) + elif dtype.is_floating_point: + prod = product(_large_float_vals, _large_float_vals) + elif dtype.is_complex: + complex_vals = product(_large_float_vals, _large_float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int16, torch.int32, torch.int64): + prod = product(_large_int_vals, _large_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + _float_extremals = (float("inf"), float("-inf"), float("nan")) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_extremals, _float_extremals) + elif dtype.is_complex: + complex_vals = product(_float_extremals, _float_extremals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + # Test case for NaN propagation + nan = ( + float("nan") if dtype.is_floating_point else complex(float("nan"), float("nan")) + ) + lhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + lhs.view(-1)[::3] = nan + rhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + rhs.view(-1)[::3] = nan + + yield SampleInput(lhs, args=(rhs,)) + + +# Returns a generator of pairs of contiguous and noncontiguous tensors that +# require broadcasting +def generate_elementwise_binary_broadcasting_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + ((1,), ()), + ((2,), ()), + ((1,), (2,)), + ((2, 1), (2,)), + ((1, 2), (2,)), + ((3, 2), (2,)), + ((1, 3, 2), (2,)), + ((1, 3, 2), (3, 2)), + ((3, 1, 2), (3, 2)), + ((2, 3, 2), ()), + ((3, 1, 2), (1, 3, 2)), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, noncontiguous in product(shapes, [True, False]): + shape_lhs, shape_rhs = shape + lhs = make_arg( + shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs + ) + rhs = make_arg( + shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs + ) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) + + +# Returns a generator of pairs of contiguous tensors and scalars +def generate_elementwise_binary_with_scalar_samples( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) + if op.supports_rhs_python_scalar: + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs, args=(rhs_scalar,)) + + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(lhs_scalar, args=(rhs,)) + + if op.supports_two_python_scalars: + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs_scalar, args=(rhs_scalar,)) + + +# Returns a generator of pairs of contiguous tensors and 0d tensors and scalars and type promotion +def generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, *, device, dtype, requires_grad=False +): + # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars + if op.name in ( + "eq", + "ne", + "gt", + "ge", + "lt", + "le", + "logical_and", + "logical_or", + "logical_xor", + ): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + shape = ( + 23, + ) # this shape is big enough to trigger vectorization, and has non-vectorized tail + values = (float("nan"), float("inf"), -float("inf")) + scalar_tensors = tuple(torch.tensor(val) for val in values) + if op.supports_rhs_python_scalar: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + for scalar in values + scalar_tensors: + yield SampleInput(lhs, args=(scalar,)) + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(scalar, args=(rhs,)) + + +# Returns a generator of pairs of noncontiguous tensors +def generate_elementwise_binary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + # Generic noncontiguity + lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) + rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.clone(), args=(rhs.clone(),)) + yield SampleInput(lhs.contiguous(), args=(rhs,)) + + # Transposed + lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) + rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.T, args=(rhs.T,)) + + # More noncontiguity + shapes = ((5, 7), (1024,)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + lhs_non_contig.copy_(lhs) + + rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + rhs_non_contig.copy_(rhs) + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Noncontiguous indices + shape = (2, 2, 1, 2) + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs[:, 1, ...] + rhs_non_contig = rhs[:, 1, ...] + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs.expand(3, -1, -1) + rhs_non_contig = rhs.expand(3, -1, -1) + + yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) + + +# Sample inputs for elementwise binary operators, like add +def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + _M = S if kwargs.get("small_inputs_only", False) else M + _S = XS if kwargs.get("small_inputs_only", False) else S + + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + shapes = ( + ((), ()), + ((_S,), ()), + ((_S, 1), (_S,)), + ((_M, _S), ()), + ((_S, _M, _S), (_M, _S)), + ((_S, _M, _S), (_S, _M, _S)), + ((_M, 1, _S), (_M, _S)), + ((_M, 1, _S), (1, _M, _S)), + ((0, 1, XS), (0, _M, XS)), + ) + + sample_kwargs = kwargs.get("sample_kwargs", {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput( + lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input + ) + + +# Metadata class for binary "universal functions (ufuncs)" that accept two +# tensor and have common properties +class BinaryUfuncInfo(OpInfo): + """Operator information for 'universal binary functions (binary ufuncs).' + These are functions of two tensors with common properties like: + - they are elementwise functions + - the output shape is determined by the input shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/stable/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, + *, + sample_inputs_func=sample_inputs_elementwise_binary, + reference_inputs_func=reference_inputs_elementwise_binary, + error_inputs_func=None, + lhs_make_tensor_kwargs=None, + rhs_make_tensor_kwargs=None, + always_returns_bool=False, # Set to true if the op always returns bool tensors + supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs + supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs + supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs + **kwargs, + ): + self._original_binary_ufunc_args = locals().copy() + + # Elementwise binary operations perform the equivalent of test_numpy_refs + # in test_binary_ufuncs, but with additional test granularity. So the + # generic test_ops.py test is skipped because it's redundant. + common_skips = ( + DecorateInfo( + unittest.skip("Skipping redundant test."), + "TestCommon", + "test_numpy_refs", + ), + ) + kwargs["skips"] = kwargs.get("skips", ()) + common_skips + super().__init__( + name, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), + **kwargs, + ) + + # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. + if lhs_make_tensor_kwargs is None: + lhs_make_tensor_kwargs = {} + self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs + + if rhs_make_tensor_kwargs is None: + rhs_make_tensor_kwargs = {} + self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs + + self.always_returns_bool = always_returns_bool + self.supports_rhs_python_scalar = supports_rhs_python_scalar + self.supports_one_python_scalar = supports_one_python_scalar + self.supports_two_python_scalars = supports_two_python_scalars + + if self.supports_two_python_scalars: + self.supports_one_python_scalar = True + + if self.supports_one_python_scalar: + assert ( + supports_rhs_python_scalar + ), "Can't support lhs and rhs Python scalars but not rhs scalars!" + + +# The following functions and classes are for testing elementwise unary operators. +def sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + _L = S if kwargs.get("small_inputs_only", False) else L + + low, high = op_info.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op_info._domain_eps + high = high if high is None or not is_floating else high - op_info._domain_eps + if ( + op_info.supports_sparse_csr + or op_info.supports_sparse_csc + or op_info.supports_sparse_bsr + or op_info.supports_sparse_bsc + ): + # Tensors with dim=2 for sparse compressed testing + yield SampleInput( + make_tensor( + (_L, _L), + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + else: + # Creates a 1D, empty, and scalar tensor + for shape in ((_L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + + +# Replace values satisfying condition with a safe value. This is used to block +# out values the could cause singularity like tan(pi/2) +def _replace_values_in_tensor(tensor, condition, safe_value): + mask = condition(tensor) + tensor.masked_fill_(mask, safe_value) + + +# Helper to create a unary elementwise tensor with valid inputs +def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): + low, high = op.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) + + if op.reference_numerics_filter is not None and dtype is not torch.bool: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + return a + + +# Restricts the values in the tensor to the domain of the +# given elementwise unary operator +def _filter_unary_elementwise_tensor(a, *, op): + # short-circuits for boolean tensors + if a.dtype is torch.bool: + return a + + low, high = op.domain + is_floating = a.dtype.is_floating_point or a.dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + if a.dtype is torch.uint8 and low is not None: + low = max(low, 0) + + if not a.dtype.is_floating_point and not a.dtype.is_complex: + low = math.ceil(low) if low is not None else None + high = math.floor(high) if high is not None else None + + if op.reference_numerics_filter is not None: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + if low is not None or high is not None: + if a.dtype.is_complex: + a.real.clamp_(low, high) + a.imag.clamp_(low, high) + else: + a.clamp_(min=low, max=high) + + return a + + +def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): + # Special-cases bool + if dtype is torch.bool: + tensors = ( + torch.empty(0, device=device, dtype=torch.bool), + torch.tensor(True, device=device), + torch.tensor(False, device=device), + torch.tensor((True, False), device=device), + make_tensor((812,), device=device, dtype=dtype), + make_tensor((1029, 917), device=device, dtype=dtype), + ) + for a in tensors: + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + shapes = ( + (1029, 917), + (812,), + # Empty sizes + (0,), + (0, 3, 3), + (1, 0, 5), + (6, 0, 0, 0), + (3, 0, 1, 0), + ) + + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + for shape in shapes: + a = make_arg(shape) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_small_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + yield SampleInput( + sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] + ) + + +def generate_elementwise_unary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + + # Generic noncontiguity + t = make_arg((1026,), noncontiguous=True) + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Transposed + t = make_arg((1024, 1024)).T + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + t = make_arg(shape) + t_non_contig = t.expand(3, -1, -1) + yield SampleInput( + t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] + ) + + +def generate_elementwise_unary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +# Reuses the elementwise binary generators for consistency +# TODO: in the future generalize the reference generators to handle n-ary elementwise operations +def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + yield from generate_elementwise_unary_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype is not torch.bool: + yield from generate_elementwise_unary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + if dtype not in (torch.bool, torch.uint8, torch.int8) and ( + op.handles_large_floats + or (not dtype.is_floating_point and not dtype.is_complex) + ): + yield from generate_elementwise_unary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype.is_floating_point or ( + op.handles_complex_extremal_values and dtype.is_complex + ): + yield from generate_elementwise_unary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + gen = partial( + _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_unary_noncontiguous_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield from generate_elementwise_unary_arbitrarily_strided_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +# Metadata class for unary "universal functions (ufuncs)" that accept a single +# tensor and have common properties like: +class UnaryUfuncInfo(OpInfo): + """Operator information for 'universal unary functions (unary ufuncs).' + These are functions of a single tensor with common properties like: + - they are elementwise functions + - the input shape is the output shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, # the string name of the function + *, + dtypes=floating_types(), + domain=(None, None), # the [low, high) domain of the function + handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) + handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) + supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle + sample_inputs_func=sample_inputs_elementwise_unary, + reference_inputs_func=reference_inputs_elementwise_unary, + sample_kwargs=lambda device, dtype, input: ({}, {}), + reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested + **kwargs, + ): + self._original_unary_ufunc_args = locals().copy() + + super().__init__( + name, + dtypes=dtypes, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + **kwargs, + ) + self.domain = domain + self.handles_complex_extremal_values = handles_complex_extremal_values + self.handles_large_floats = handles_large_floats + self.supports_complex_to_float = supports_complex_to_float + self.reference_numerics_filter = reference_numerics_filter + + # test_unary_ufuncs.py generates its own inputs to test the consistency + # of the operator on sliced tensors, non-contig tensors, etc. + # `sample_kwargs` is a utility function to provide kwargs + # along with those inputs if required (eg. clamp). + # It should return two dictionaries, first holding kwarg for + # torch operator and second one for reference NumPy operator. + self.sample_kwargs = sample_kwargs + + # Epsilon to ensure grad and gradgrad checks don't test values + # outside a function's domain. + self._domain_eps = 1e-5 + + +def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): + is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half + if not is_fp16_or_chalf: + nd_tensor = partial( + make_tensor, + (S, S + 1, S + 2), + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad + ) + else: + # cuFFT supports powers of 2 for half and complex half precision + # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args + # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two + low = None + high = None + if self.name in ["fft.hfft", "fft.irfft", "_refs.fft.hfft", "_refs.fft.irfft"]: + shapes = ((2, 9, 9), (33,)) + elif self.name in [ + "fft.hfft2", + "fft.irfft2", + "_refs.fft.hfft2", + "_refs.fft.irfft2", + ]: + shapes = ((2, 8, 9), (33,)) + elif self.name in [ + "fft.hfftn", + "fft.irfftn", + "_refs.fft.hfftn", + "_refs.fft.irfftn", + ]: + shapes = ((2, 2, 33), (33,)) + # Adjusting the limits because the test would be flaky due to over-saturation of float16 + # See: https://github.com/pytorch/pytorch/pull/81416 + low = -1.0 + high = 1.0 + else: + shapes = ((2, 8, 16), (32,)) + nd_tensor = partial( + make_tensor, + shapes[0], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, + shapes[1], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + + if self.ndimensional == SpectralFuncType.ND: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(8,)) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3, (0, -1)]) + elif self.ndimensional == SpectralFuncType.TwoD: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(6, 8) if not is_fp16_or_chalf else (4, 8)) + yield SampleInput(nd_tensor(), dim=0) + yield SampleInput(nd_tensor(), dim=(0, -1)) + yield SampleInput(nd_tensor(), dim=(-3, -2, -1)) + else: + yield SampleInput( + nd_tensor(), + n=10 if not is_fp16_or_chalf else 8, + dim=1, + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), n=7 if not is_fp16_or_chalf else 8) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3]) + + +SpectralFuncType = Enum("SpectralFuncType", ("OneD", "TwoD", "ND")) + + +# Metadata class for Fast Fourier Transforms in torch.fft. +class SpectralFuncInfo(OpInfo): + """Operator information for torch.fft transforms.""" + + def __init__( + self, + name, # the string name of the function + *, + ref=None, # Reference implementation (probably in np.fft namespace) + dtypes=floating_and_complex_types(), + ndimensional: SpectralFuncType, + sample_inputs_func=sample_inputs_spectral_ops, + decorators=None, + **kwargs, + ): + self._original_spectral_func_args = dict(locals()).copy() + self._original_spectral_func_args.update(kwargs) + + decorators = list(decorators) if decorators is not None else [] + decorators += [ + skipCPUIfNoFFT, + DecorateInfo( + toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), + "TestCommon", + "test_complex_half_reference_testing", + ), + ] + + super().__init__( + name=name, + dtypes=dtypes, + decorators=decorators, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + self.ndimensional = ndimensional + + +class ShapeFuncInfo(OpInfo): + """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" + + def __init__( + self, + name, # the string name of the function + *, + ref, # a reference function + dtypes=floating_types(), + dtypesIfCUDA=None, + dtypesIfROCM=None, + dtypesIfXPU=None, + sample_inputs_func=None, + **kwargs, + ): + super().__init__( + name, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + dtypesIfXPU=dtypesIfXPU, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + + +def sample_inputs_foreach( + self, + device, + dtype, + N, + *, + noncontiguous=False, + same_size=False, + low=None, + high=None, + zero_size: bool, + requires_grad: bool, + # mutually exclusive from same_size and zero_size, which are all or nothing + intersperse_empty_tensors: bool = False, +): + if zero_size: + return [torch.empty(0, dtype=dtype, device=device) for _ in range(N)] + if same_size: + return [ + make_tensor( + (N, N), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for _ in range(N) + ] + else: + # interweave some empty tensors + have the last 2 tensors be empty (see #100701) + return [ + torch.empty(0, dtype=dtype, device=device, requires_grad=requires_grad) + if (i % 3 == 0 or i >= N - 2) and intersperse_empty_tensors + else make_tensor( + (N - i, N - i), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for i in range(N) + ] + + +def get_foreach_method_names(name): + # get torch inplace reference function + op_name = "_foreach_" + name + inplace_op_name = op_name + "_" + + op = getattr(torch, op_name, None) + inplace_op = getattr(torch, inplace_op_name, None) + + ref = getattr(torch, name, None) + ref_inplace = getattr(torch.Tensor, name + "_", None) + return op, inplace_op, ref, ref_inplace + + +@dataclass +class ForeachFuncInfo(OpInfo): + """Early version of a specialized OpInfo for foreach functions + + The main differences from the parent class are (a) `dtypes`, `dtypesIfCUDA`, and `dtypesIfROCM` + are set to `get_all_dtypes(include_qint=False)`, and (b) the following arguments. + + ``supports_alpha_param=True`` means that the function supports a python scalar (``numbers.Number``) + as the last keyword argument such as `_foreach_add`. + ``supports_scalar_self_arg=True`` means that the function can take a python scalar as its first argument. + Currently only `_foreach_pow` supports this. + ``backward_requires_result=True``, which could sound self-explanatory, means that the function uses + the forward result for its backward computation. + """ + + supports_alpha_param: bool = False + supports_scalar_self_arg: bool = False + backward_requires_result: bool = False + + def __post_init__(self): + ( + foreach_method, + foreach_method_inplace, + torch_ref_method, + torch_ref_inplace, + ) = get_foreach_method_names(self.name) + if not self.supports_out: + # note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call + # `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero` + # is not defined at the moment. Thus to skip the qualification, set a similar torch + # function. + assert foreach_method is None + assert torch_ref_method is None + foreach_method = foreach_method_inplace + torch_ref_method = torch_ref_inplace + + self.dtypes = _dispatch_dtypes(get_all_dtypes(include_qint=False)) + + self.op = foreach_method + self.method_variant = foreach_method + self.ref = torch_ref_method + self.inplace_variant = foreach_method_inplace + self.ref_inplace = torch_ref_inplace + self.has_no_in_place = self.inplace_variant is None + + name = self.name + self.name = f"_foreach_{name}" + if name == "norm": + self.ref = torch.linalg.vector_norm + elif name == "minimum": + # because minimum ref does not support inplace or scalar + self.ref = torch.clamp_max + self.ref_inplace = torch.Tensor.clamp_max_ + elif name == "maximum": + # because maximum ref does not support inplace or scalar + self.ref = torch.clamp_min + self.ref_inplace = torch.Tensor.clamp_min_ + + # The following sets `dtypesIfCUDA` and `dtypesIfROCM` accordingly. + super().__post_init__() + + def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs): + if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"): + return [] + return self.sample_inputs_func.sample_zero_size_tensor_inputs( + self, device, dtype, requires_grad, **kwargs + ) + + +def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): + """Gradcheck wrapper for functions that take Hermitian matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the Hermitian property of the input. + """ + return op(input + input.mH, *args, **kwargs) + + +def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): + """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the triangular property of the input. + `idx` is used to specific which `args[idx]` is to be triangularized. + """ + triangular_arg = args[idx].triu() if upper else args[idx].tril() + return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs) + + +def gradcheck_wrapper_triangular_input_real_positive_diagonal( + op, *args, upper=False, idx=0, **kwargs +): + """Gradcheck wrapper for functions that take lower/upper triangular matrices + with real and positive diagonals, for example, cholesky-like operations. + """ + arg = args[idx] + arg_diag = arg.diagonal(0, -2, -1) + arg_diag_embed = torch.diag_embed(arg_diag) + id_diag_tensor = torch.ones_like(arg_diag) + id_tensor = torch.diag_embed(id_diag_tensor) + # new_arg = arg - diag(arg) + I + new_arg = arg - arg_diag_embed + id_tensor + return gradcheck_wrapper_triangular_input( + op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs + ) + + +def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked operations. + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + mask = kwargs.get("mask") + if mask is not None: + output_mask = torch.masked._output_mask(op, input, *args, **kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked pointwise operations. Assumes that the result + will be masked iff both tensors are masked at a specific index + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + input_mask = kwargs.get("input_mask") + other_mask = kwargs.get("other_mask") + if input_mask is not None and other_mask is not None: + combined_mask = torch.logical_and(input_mask, other_mask) + new_kwargs = dict(mask=combined_mask, **kwargs) + output_mask = torch.masked._input_mask(input, *args, **new_kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def clone_sample(sample, **kwargs): + """ + Given a SampleInput, this function analyzes its input, args and kwargs, + and produces a copy with each non-Tensor entry being copied by reference, + and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` + """ + + def clone_tensor(t): + if isinstance(t, torch.Tensor): + return t.detach().clone().requires_grad_(t.requires_grad) + else: + return t + + sample_kwargs = kwargs if kwargs else sample.kwargs + + return SampleInput( + clone_tensor(sample.input), + args=tuple(map(clone_tensor, sample.args)), + kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()}, + ) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4820a3eae23293c31ff45ed4260870171b532cd4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py @@ -0,0 +1,28 @@ +# mypy: ignore-errors + +from typing import List + +from torch.testing._internal.opinfo.core import OpInfo +from torch.testing._internal.opinfo.definitions import ( + _masked, + fft, + linalg, + signal, + special, +) + + +# Operator database +op_db: List[OpInfo] = [ + *fft.op_db, + *linalg.op_db, + *signal.op_db, + *special.op_db, + *_masked.op_db, +] + +python_ref_db: List[OpInfo] = [ + *fft.python_ref_db, + *linalg.python_ref_db, + *special.python_ref_db, +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e598d8ffc3937423a7baf194f909c7a2da7bd04 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0189a39134dfb2d22a497a6b2d9b0563a5ebe9fe Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ace7f5b4e2530ad6b2cb29ec5b89a117047fb583 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a120172c2b967236024142a8cc3cb313bff2aef Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5528456ea1613ab047667af709c2dd0850df436e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f41ca3ce5d4b6347072b76463b372aca327f0db3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d0174bef76012c297d7a7d2f7e500e161462950 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31befe5c0bc71e5deff976ec1898c074f39175c Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py new file mode 100644 index 0000000000000000000000000000000000000000..eda339ebfe68a60a08162a8a1171371fdce72e84 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py @@ -0,0 +1,1228 @@ +# mypy: ignore-errors + +import unittest +from collections.abc import Sequence +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, + complex_types, + floating_and_complex_types_and, + floating_types_and, + integral_types, +) +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + M, + OpInfo, + ReductionOpInfo, + S, + sample_inputs_reduction, + SampleInput, +) +from torch.testing._internal.opinfo.utils import prod_numpy, reference_reduction_numpy + + +# Used for log_softmax, softmax, softmin +def sample_inputs_softmax_variant( + op_info, + device, + dtype, + requires_grad, + with_dtype=False, + use_zero_dimensions=True, + **kwargs, +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), (0,)), + ((S, S), (0,)), + ((S, S), (1,)), + ((S, S), (-1,)), + ((S, M, S), (2,)), + *([((S, 0, 0), (-1,))] if use_zero_dimensions else []), + ] + kwargs = dict(dtype=torch.float64) if with_dtype else None + + # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. + # See https://github.com/pytorch/xla/issues/3061 for more details. + if torch.device(device).type != "xla": + cases.append(((), (0,))) + + return ( + SampleInput(make_arg(shape), args=dim, kwargs=kwargs) for shape, dim in cases + ) + + +def _generate_masked_op_mask(input_shape, device, **kwargs): + make_arg = partial( + make_tensor, dtype=torch.bool, device=device, requires_grad=False + ) + yield None + yield make_arg(input_shape) + if len(input_shape) > 2: + # broadcast last mask dimension: + yield make_arg(input_shape[:-1] + (1,)) + # broadcast middle mask dimension: + yield make_arg(input_shape[:1] + (1,) + input_shape[2:]) + # broadcast first mask dimension: + yield make_arg((1,) + input_shape[1:]) + # mask.ndim < input.ndim + yield make_arg(input_shape[1:]) + # mask.ndim == 1 + yield make_arg(input_shape[-1:]) + # masks that require broadcasting of inputs (mask.ndim > + # input.ndim) will not be supported, however, we may + # reconsider this if there will be demand on this kind of + # degenerate cases. + + +def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked reduction operators. + + Masked reduction operator is a reduction operator with trailing + mask optional argument. A mask is a bool tensor with the same + shape as input or a shape that is broadcastable to input shape. + """ + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_sparse_coo_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse coo layouts. + """ + if op_info.supports_sparse: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse()) + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in {"prod", "amax", "amin"}: + # FIXME: for now reductions with non-zero reduction identity and + # unspecified mask are not supported for sparse COO + # tensors, see torch.masked.prod implementation + # for details. + continue + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def sample_inputs_sparse_csr_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse csr layouts. + """ + if op_info.supports_sparse_csr: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if not ( + sample_input.input.ndim == 2 and sample_input.kwargs.get("keepdim") + ): + # - sparse CSR tensors are always 2-D tensors + # - masked reduction on CSR tensors are defined only if keepdim is True. + continue + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse_csr()) + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in ["prod", "amax", "amin", "mean"]: + # reductions with non-zero reduction identity and + # unspecified mask is not supported for sparse CSR + # tensors, see torch.masked.prod implementation + # for details. + continue + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + yield new_sample + if sample_input.kwargs["dim"] == 0: + # Reductions of CSR tensors use different implementations for + # inner and/or outer dimensions. So, as a minimum of testing CSR + # implementations the following kwargs must be generated: + # dict(dim=0, keepdim=True) + # dict(dim=1, keepdim=True) + # dict(dim=(0, 1), keepdim=True) + # Here we generate the dim=1 case from the dim=0 case. + sample_input_kwargs = new_sample.kwargs.copy() + sample_input_kwargs.update(dim=1) + yield SampleInput( + new_sample.input.clone(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked norm.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + sample_input_args, sample_input_kwargs = ( + ord, + ) + sample_input.args, sample_input.kwargs.copy() + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def reference_masked_std_var( + numpy_fn, +): + ref = reference_reduction_numpy(numpy_fn) + + # Translate unbiased or correction arguments into ddof + def func( + input, + dim=None, + unbiased=None, + *, + correction=None, + **kwargs, + ): + ddof = 1 + if unbiased is not None: + ddof = 1 if unbiased else 0 + if correction is not None: + ddof = correction + + if isinstance(dim, Sequence): + dim = tuple(dim) + + return ref(input, dim, ddof=ddof, **kwargs) + + return func + + +def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked std/var.""" + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + from torch.testing._internal.common_methods_invocations import sample_inputs_std_var + + def masked_samples(): + for sample_input in sample_inputs_std_var( + op_info, device, dtype, requires_grad, **kwargs + ): + if len(sample_input.args) and isinstance(sample_input.args[0], bool): + continue # masked.{std, var} doesn't support `.var(unbiased)` + + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + for sample_input in masked_samples(): + correction = sample_input.kwargs.get("correction") + if correction is None: + correction = int(sample_input.kwargs.get("unbiased", True)) + + dim = sample_input.kwargs.get("dim", None) + + if sample_input.kwargs.get("mask") is None: + orig_count = torch.masked.sum( + torch.ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + ) + else: + inmask = torch.masked._input_mask( + sample_input.input, *sample_input.args, **sample_input.kwargs + ) + orig_count = torch.masked.sum( + inmask.new_ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + if orig_count.min() <= correction + 1: + # Skip samples that lead to nans in var computation + continue + + yield sample_input + + +def sample_inputs_masked_softmax( + op_info, device, dtype, requires_grad, with_dtype=False, **kwargs +): + """Sample inputs for masked softmax, log_softmax, and softmin. + + Masked normalization operator is a reduction operator with + trailing mask optional argument. A mask is a bool tensor with the + same shape as input or a shape that is broadcastable to input + shape. + """ + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input.args, + mask=mask, + **sample_input.kwargs, + ) + + +def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked cumsum and cumprod.""" + inputs: List[SampleInput] = [] + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + if type(mask) != torch.Tensor: + continue + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + if "keepdim" in sample_input_kwargs: + sample_input_kwargs.pop("keepdim") + # dimension is required + if sample_input_args: + dim = sample_input.args[0] + else: + if "dim" not in sample_input_kwargs: + continue + dim = sample_input_kwargs.pop("dim") + sample_input_args = (dim,) + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input_args, + **sample_input_kwargs, + ) + + +def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked logaddexp.""" + shapes = [(S,), (S, S), (S, M, S)] + input_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + other_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for shape, input_masks, other_masks in zip( + shapes, input_mask_lists, other_mask_lists + ): + for input_mask, other_mask in zip(input_masks, other_masks): + yield SampleInput( + make_arg(shape), + make_arg(shape), + input_mask=input_mask, + other_mask=other_mask, + ) + + +def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked normalize.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, use_zero_dimensions=False, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + ord, + *sample_input.args, + **sample_input.kwargs, + ) + + +op_db: List[OpInfo] = [ + ReductionOpInfo( + "masked.sum", + ref=reference_reduction_numpy(np.sum), + method_variant=None, + identity=0, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=5e-2), + torch.float16: tol(atol=1e-03, rtol=5e-3), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=0.1, rtol=0.1), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestMasked", + "test_mask_layout", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + ReductionOpInfo( + "masked.prod", + ref=prod_numpy, + method_variant=None, + identity=1, + nan_policy="propagate", + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + "TestReductions", + "test_ref_small_input", + dtypes=(torch.int8, torch.int16, torch.int32), + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + device_type="cuda", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_duplicate_values", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1.5e-03)}), + "TestMasked", + "test_mask_layout", + device_type="cpu", + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)}), + "TestOperators", + "test_jvp", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + OpInfo( + "masked.cumsum", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.cumprod", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCompositeCompliance", + "test_backward", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=2.6e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amax", + nan_policy="propagate", + supports_out=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amax), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amin", + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amin), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmax", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmax + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmin", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmin + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.mean", + ref=reference_reduction_numpy(np.mean) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_sparse_csr=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_duplicate_values", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_small_input", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=0.05), + torch.float16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=2e-03)}), + "TestSparseCompressed", + "test_consistency", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.median", + dtypes=floating_types_and(torch.bfloat16, torch.float16), + method_variant=None, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=partial( + sample_inputs_masked_softmax, use_zero_dimensions=False + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.norm", + identity=0, + method_variant=None, + nan_policy="propagate", + supports_out=False, + promotes_int_to_float=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # torch.jit.frontend.NotSupportedError: Compiled functions + # can't take variable number of arguments or use + # keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_norm, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.var", + ref=reference_masked_std_var(np.var) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=4e-5, rtol=2e-2), + } + ), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + ReductionOpInfo( + "masked.std", + ref=reference_masked_std_var(np.std) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-02, rtol=1e-02), + torch.float16: tol(atol=1e-02, rtol=1e-02), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=5e-03, rtol=5e-04), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + OpInfo( + "masked.softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.log_softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + ], + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.softmin", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: + # Mismatched elements: 2 / 2 (100.0%) + # Greatest absolute difference: nan at index (0,) (up to 0.0001 allowed) + # Greatest relative difference: nan at index (0,) (up to 0.0001 allowed + DecorateInfo( + unittest.skip("Skipped!"), + "TestOperators", + "test_vmapvjpvjp", + device_type="cpu", + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.normalize", + method_variant=None, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_normalize, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-5, rtol=6e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ], + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.logaddexp", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestFwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestBwdGradients", "test_fn_gradgrad" + ), + ), + sample_inputs_func=sample_inputs_masked_logaddexp, + gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation, + ), + ReductionOpInfo( + "masked.logsumexp", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.skip("Skipped!"), "TestReductions", "test_dim_empty_keepdim" + ), + # Identity can't be -torch.inf without overflow + DecorateInfo( + unittest.skip("Skipped!"), + "TestReductions", + "test_empty_tensor_empty_slice", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # all the values are the same except for -inf vs nan + DecorateInfo(unittest.skip("Skipped!"), "TestDecomp", "test_comprehensive"), + # FIXME: + # Mismatched elements: 2 / 12 (16.7%) + # Greatest absolute difference: 9223372034707292160 at index (0, 0, 0, 0) + # Greatest relative difference: 0.0 at index (0, 0, 0, 1) + DecorateInfo( + unittest.skip("Skipped!"), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cpu", + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed395eef0203afc52f7ec9bcc24bd2d7ce5fa18 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py @@ -0,0 +1,810 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import SM53OrLater +from torch.testing._internal.common_device_type import precisionOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, +) +from torch.testing._internal.common_utils import TEST_SCIPY, TEST_WITH_ROCM +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + sample_inputs_spectral_ops, + SampleInput, + SpectralFuncInfo, + SpectralFuncType, +) +from torch.testing._internal.opinfo.refs import ( + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, +) + + +has_scipy_fft = False +if TEST_SCIPY: + try: + import scipy.fft + + has_scipy_fft = True + except ModuleNotFoundError: + pass + + +class SpectralFuncPythonRefInfo(SpectralFuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant="", + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant, op_db=op_db + ) + assert isinstance(self.torch_opinfo, SpectralFuncInfo) + + inherited = self.torch_opinfo._original_spectral_func_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +def error_inputs_fft(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Zero-dimensional tensor has no dimension to take FFT of + yield ErrorInput( + SampleInput(make_arg()), + error_type=IndexError, + error_regex="Dimension specified as -1 but tensor has no dimensions", + ) + + +def error_inputs_fftn(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Specifying a dimension on a zero-dimensional tensor + yield ErrorInput( + SampleInput(make_arg(), dim=(0,)), + error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions", + ) + + +def sample_inputs_fft_with_min( + op_info, device, dtype, requires_grad=False, *, min_size, **kwargs +): + yield from sample_inputs_spectral_ops( + op_info, device, dtype, requires_grad, **kwargs + ) + if TEST_WITH_ROCM: + # FIXME: Causes floating point exception on ROCm + return + + # Check the "Invalid number of data points" error isn't too strict + # https://github.com/pytorch/pytorch/pull/109083 + a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a) + + +def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor( + shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield SampleInput(mt((9, 10))) + yield SampleInput(mt((50,)), kwargs=dict(dim=0)) + yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) + yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) + yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) + + +# Operator database +op_db: List[OpInfo] = [ + SpectralFuncInfo( + "fft.fft", + aten_name="fft_fft", + decomp_aten_name="_fft_c2c", + ref=np.fft.fft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + SpectralFuncInfo( + "fft.fft2", + aten_name="fft_fft2", + ref=np.fft.fft2, + decomp_aten_name="_fft_c2c", + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_complex_half_reference_testing", + device_type="cuda", + dtypes=[torch.complex32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + SpectralFuncInfo( + "fft.fftn", + aten_name="fft_fftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.fftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + ), + SpectralFuncInfo( + "fft.hfft", + aten_name="fft_hfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.hfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + check_batched_gradgrad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + ), + ), + SpectralFuncInfo( + "fft.hfft2", + aten_name="fft_hfft2", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfft2 if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + # FIXME: errors are too large; needs investigation + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_complex_half_reference_testing", + device_type="cuda", + ), + ), + ), + SpectralFuncInfo( + "fft.hfftn", + aten_name="fft_hfftn", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.rfft", + aten_name="fft_rfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + skips=(), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.rfft2", + aten_name="fft_rfft2", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft2, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.rfftn", + aten_name="fft_rfftn", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.ifft", + aten_name="fft_ifft", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + ), + SpectralFuncInfo( + "fft.ifft2", + aten_name="fft_ifft2", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ifftn", + aten_name="fft_ifftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ihfft", + aten_name="fft_ihfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.ihfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fft, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + skips=(), + check_batched_grad=False, + ), + SpectralFuncInfo( + "fft.ihfft2", + aten_name="fft_ihfft2", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=( + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"), + ), + ), + SpectralFuncInfo( + "fft.ihfftn", + aten_name="fft_ihfftn", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + ], + ), + SpectralFuncInfo( + "fft.irfft", + aten_name="fft_irfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.irfft2", + aten_name="fft_irfft2", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.irfftn", + aten_name="fft_irfftn", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + OpInfo( + "fft.fftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "fft.ifftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), +] + +python_ref_db: List[OpInfo] = [ + SpectralFuncPythonRefInfo( + "_refs.fft.fft", + torch_opinfo_name="fft.fft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft", + torch_opinfo_name="fft.ifft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft", + torch_opinfo_name="fft.rfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft", + torch_opinfo_name="fft.irfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft", + torch_opinfo_name="fft.hfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft", + torch_opinfo_name="fft.ihfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fftn", + torch_opinfo_name="fft.fftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifftn", + torch_opinfo_name="fft.ifftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfftn", + torch_opinfo_name="fft.rfftn", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfftn", + torch_opinfo_name="fft.irfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfftn", + torch_opinfo_name="fft.hfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfftn", + torch_opinfo_name="fft.ihfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + # AssertionError: Reference result was farther (0.09746177145360499) from the precise + # computation than the torch result was (0.09111555632069855) + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_torch_fallback", + dtypes=(torch.float16,), + device_type="cuda", + ), + # AssertionError: Reference result was farther (0.0953431016138116) from the precise + # computation than the torch result was (0.09305490684430734) + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_executor", + dtypes=(torch.float16,), + device_type="cuda", + ), + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fft2", + torch_opinfo_name="fft.fft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft2", + torch_opinfo_name="fft.ifft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft2", + torch_opinfo_name="fft.rfft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft2", + torch_opinfo_name="fft.irfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft2", + torch_opinfo_name="fft.hfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft2", + torch_opinfo_name="fft.ihfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + # FIXME: + # Reference result was farther (0.0953431016138116) from the precise computation + # than the torch result was (0.09305490684430734)! + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_executor", + device_type="cuda", + ), + ], + ), + PythonRefInfo( + "_refs.fft.fftshift", + op_db=op_db, + torch_opinfo_name="fft.fftshift", + ), + PythonRefInfo( + "_refs.fft.ifftshift", + op_db=op_db, + torch_opinfo_name="fft.ifftshift", + ), +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..e94c6a67114431c224131124d89ab8868ca13e66 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py @@ -0,0 +1,2481 @@ +# mypy: ignore-errors + +import itertools +import random +import unittest +from functools import partial +from itertools import chain, product +from typing import Iterable, List, Tuple + +import numpy as np +from numpy import inf + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import ( + _get_magma_version, + _get_torch_cuda_version, + with_tf32_off, +) +from torch.testing._internal.common_device_type import ( + has_cusolver, + skipCPUIfNoLapack, + skipCUDAIf, + skipCUDAIfNoCusolver, + skipCUDAIfNoMagma, + skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoMagmaAndNoLinalgsolver, + skipCUDAIfRocm, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + all_types_and_complex, + all_types_and_complex_and, + floating_and_complex_types, + floating_and_complex_types_and, + get_all_complex_dtypes, +) +from torch.testing._internal.common_utils import ( + GRADCHECK_NONDET_TOL, + IS_MACOS, + make_fullrank_matrices_with_distinct_singular_values, + skipIfSlowGradcheckEnv, + slowTest, + TEST_WITH_ROCM, +) +from torch.testing._internal.opinfo.core import ( + clone_sample, + DecorateInfo, + ErrorInput, + gradcheck_wrapper_hermitian_input, + L, + M, + OpInfo, + ReductionOpInfo, + S, + SampleInput, +) +from torch.testing._internal.opinfo.refs import PythonRefInfo, ReductionPythonRefInfo + + +def sample_kwargs_vector_norm(t, **kwargs): + # orders with / without identity + def ords(): + has_id = (6, 4, 2, 1, 0, 0.9) + no_id = (inf, -2.1, -inf) + if t.numel() == 0: + dim = kwargs.get("dim") + if dim is None: + return has_id + if not isinstance(dim, Iterable): + dim = (dim,) + for d in dim: + if t.size(d) == 0: + return has_id + return has_id + no_id + + return (((), dict(ord=o)) for o in ords()) + + +def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + is_linalg_svd = "linalg.svd" in op_info.name + batches = [(), (0,), (3,)] + ns = [0, 3, 5] + + def uniformize(usv): + S = usv[1] + k = S.shape[-1] + U = usv[0][..., :k] + Vh = usv[2] if is_linalg_svd else usv[2].mH + Vh = Vh[..., :k, :] + return U, S, Vh + + def fn_U(usv): + U, _, _ = uniformize(usv) + return U.abs() + + def fn_S(usv): + return uniformize(usv)[1] + + def fn_Vh(usv): + # We also return S to test + _, S, Vh = uniformize(usv) + return S, Vh.abs() + + def fn_UVh(usv): + U, S, Vh = uniformize(usv) + return U @ Vh, S + + fns = (fn_U, fn_S, fn_Vh, fn_UVh) + + fullmat = "full_matrices" if is_linalg_svd else "some" + + for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): + shape = batch + (n, k) + yield SampleInput( + make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn + ) + + +def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + yield SampleInput(make_arg((S, 3)), args=(make_arg((S, 3)),)) + yield SampleInput( + make_arg((S, 3, S)), args=(make_arg((S, 3, S)),), kwargs=dict(dim=1) + ) + yield SampleInput(make_arg((1, 3)), args=(make_arg((S, 3)),), kwargs=dict(dim=-1)) + + +def error_inputs_cross(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + sample = SampleInput(input=make_arg((S, 3)), args=(make_arg((S, 1)),)) + err = "inputs dimension -1 must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((5, S, 3)), args=(make_arg((S, 3)),)) + err = "inputs must have the same number of dimensions" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((S, 2)), args=(make_arg((S, 2)),)) + err = "must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput( + input=make_arg((S, 2)), args=(make_arg((S, 2)),), kwargs=dict(dim=2) + ) + err = "Dimension out of range" + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + + +def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): + """ + This function generates input for torch.linalg.householder_product (torch.orgqr). + The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. + Empty, square, rectangular, batched square and batched rectangular input is generated. + """ + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + low=-2, + high=2, + ) + # Each column of the matrix is getting multiplied many times leading to very large values for + # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. + # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. + yield SampleInput(make_arg((S, S)), make_arg((S,))) + yield SampleInput(make_arg((S + 1, S)), make_arg((S,))) + yield SampleInput(make_arg((2, 1, S, S)), make_arg((2, 1, S))) + yield SampleInput(make_arg((2, 1, S + 1, S)), make_arg((2, 1, S))) + yield SampleInput( + make_arg((0, 0), low=None, high=None), + make_arg((0,), low=None, high=None), + ) + yield SampleInput(make_arg((S, S)), make_arg((0,), low=None, high=None)) + # m = n = S, k = S - 2 + yield SampleInput(make_arg((S, S)), make_arg((S - 2,), low=None, high=None)) + # m = S, n = S -1, k = S - 2 + yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None)) + + +def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype) + + def make_singular_matrix_batch_base(size, rank): + assert size[-1] == size[-2] + assert rank > 0 and rank < size[-1] + + n = size[-1] + a = make_arg(size[:-2] + (n, rank)) / 10 + b = make_arg(size[:-2] + (rank, n)) / 10 + x = a @ b + lu, pivs, _ = torch.linalg.lu_factor_ex(x) + p, l, u = torch.lu_unpack(lu, pivs) + u_diag_abs = u.diagonal(0, -2, -1).abs() + u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values + u_diag_abs_smallest_idxs = torch.topk( + u_diag_abs, k=(n - rank), largest=False + ).indices + u.diagonal(0, -2, -1).div_(u_diag_abs_largest) + u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps + matrix = p @ l @ u + + matrix.requires_grad_(requires_grad) + return matrix + + for batch, size in product(((), (2,), (2, 2)), range(6)): + shape = batch + (size, size) + for rank in range(1, size): + yield SampleInput(make_singular_matrix_batch_base(shape, rank)) + + +def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_arg_fullrank = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + # (, ()) + test_sizes = [ + (1, ()), + (2, (0,)), + (2, (2,)), + ] + + for matrix_size, batch_sizes in test_sizes: + size = batch_sizes + (matrix_size, matrix_size) + for n in (0, 3, 5): + yield SampleInput(make_arg(size), args=(n,)) + for n in [-4, -2, -1]: + yield SampleInput(make_arg_fullrank(*size), args=(n,)) + + +def sample_inputs_linalg_det_logdet_slogdet( + op_info, device, dtype, requires_grad, **kwargs +): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + batches = [(), (0,), (3,)] + ns = [0, 1, 5] + + is_logdet = op_info.name == "logdet" + + for ( + batch, + n, + ) in product(batches, ns): + shape = batch + (n, n) + A = make_arg(*shape) + # Need to make the matrices in A have positive determinant for autograd + # To do so, we multiply A by its determinant to flip the sign of its determinant + if is_logdet and not A.is_complex() and A.numel() > 0: + s = torch.linalg.slogdet(A).sign + A = A * s.unsqueeze(-1).unsqueeze(-1) + A.requires_grad_(requires_grad) + yield SampleInput(A) + + +def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """Samples the inputs for both linalg.lu_solve and lu_solve""" + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_a = partial(make_fn, dtype=dtype, device=device) + make_b = partial(make_tensor, dtype=dtype, device=device) + + def clone(X, requires_grad): + Y = X.clone() + Y.requires_grad_(requires_grad) + return Y + + is_linalg_lu_solve = op_info.name == "linalg.lu_solve" + + batches = ((), (0,), (2,)) + ns = (3, 1, 0) + nrhs = (4, 1, 0) + + for n, batch, rhs in product(ns, batches, nrhs): + A = make_a(*(batch + (n, n))) + LU, pivots = torch.linalg.lu_factor(A) + + B = make_b(batch + (n, rhs)) + + grads = (False,) if not requires_grad else (True, False) + # we try all possible combinations of requires_grad for each input + for LU_grad, B_grad in product(grads, grads): + # when requires_grad == True, at least one input has to have requires_grad enabled + if requires_grad and not LU_grad and not B_grad: + continue + + if is_linalg_lu_solve: + for adjoint, left in product((True, False), repeat=2): + yield SampleInput( + clone(LU, LU_grad), + args=(pivots, clone(B if left else B.mT, B_grad)), + kwargs=dict(adjoint=adjoint, left=left), + ) + else: + yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots)) + + +def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): + # Each test case consists of the sizes in the chain of multiplications + # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) + test_cases = [ + [1, 2, 1], + [2, 0, 2], + [0, 2, 2], + [2, 2, 2, 2], + [2, 3, 4, 5], + [5, 4, 0, 2], + [2, 4, 3, 5, 3, 2], + ] + + for sizes in test_cases: + tensors = [] + for size in zip(sizes[:-1], sizes[1:]): + t = make_tensor( + size, dtype=dtype, device=device, requires_grad=requires_grad + ) + tensors.append(t) + yield SampleInput(tensors) + + +def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): + low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + sizes = ((2, 2), (2, 3, 2)) + if dtype in low_precision_dtypes: + # svdvals not supported for low precision dtypes + ords = ("fro", inf, -inf, 1, -1) + else: + ords = ("fro", "nuc", inf, -inf, 1, -1, 2, -2) + dims = ((-2, -1), (-1, 0)) + + for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): + yield SampleInput(make_arg(size), args=(ord, dim, keepdim)) + + +def sample_inputs_linalg_norm( + op_info, device, dtype, requires_grad, *, variant=None, **kwargs +): + if variant is not None and variant not in ("subgradient_at_zero",): + raise ValueError( + f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}" + ) + + test_sizes = [ + (S,), + (0,), + (S, S), + (0, 0), + (S, 0), + (0, S), + (S, S, S), + (0, S, S), + (S, 0, S), + (0, 0, 0), + ] + + vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) + if dtype in {torch.float16, torch.bfloat16, torch.complex32}: + # svdvals not supported for low precision dtypes + matrix_ords = ("fro", inf, -inf, 1, -1) + else: + matrix_ords = (None, "fro", "nuc", inf, -inf, 1, -1, 2, -2) + + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + low=None, + high=None, + ) + + for test_size in test_sizes: + is_vector_norm = len(test_size) == 1 + is_matrix_norm = len(test_size) == 2 + + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + is_valid_for_p2 = is_vector_norm or (test_size[-1] != 0 and test_size[-2] != 0) + + for keepdim in [False, True]: + if variant != "subgradient_at_zero" and is_valid_for_p2: + yield SampleInput(make_arg(test_size), keepdim=keepdim) + + if not (is_vector_norm or is_matrix_norm): + continue + + ords = vector_ords if is_vector_norm else matrix_ords + + for ord in ords: + if is_vector_norm and test_size[-1] == 0: + if ord == np.inf or (ord is not None and ord < 0): + # RuntimeError: linalg.vector_norm cannot compute the + # {ord} norm on an empty tensor because the operation + # does not have an identity + continue + elif is_matrix_norm: + dims_to_check = { + None: (0,), + np.inf: (0,), + 2: (0, 1), + 1: (1,), + -1: (1,), + -2: (0, 1), + -np.inf: (0,), + }.get(ord, ()) + + if any(test_size[d] == 0 for d in dims_to_check): + # IndexError: amax(): Expected reduction dim {dim} to + # have non-zero size. + continue + + if variant == "subgradient_at_zero": + yield SampleInput( + torch.zeros( + test_size, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ), + ord, + keepdim=keepdim, + ) + else: + yield SampleInput(make_arg(test_size), ord, keepdim=keepdim) + + if ord in ["nuc", "fro"]: + yield SampleInput( + make_arg(test_size), ord=ord, keepdim=keepdim, dim=(0, 1) + ) + + +def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + batches = ((), (0,), (1,), (5,)) + ns = (0, 1, 3, 5) + for b, n in product(batches, ns): + shape = b + (n,) + yield SampleInput(make_arg(shape), args=(make_arg(shape),)) + for i in range(len(shape)): + yield SampleInput( + make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i) + ) + + +def sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates invertible inputs for linear algebra ops + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + + for batch, n in product(batches, ns): + yield SampleInput(make_arg(*batch, n, n)) + + +def sample_inputs_matrix_rank(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces inputs for matrix rank that test + all possible combinations for atol and rtol + """ + + def make_tol_arg(kwarg_type, inp): + if kwarg_type == "none": + return None + if kwarg_type == "float": + return 1.0 + assert kwarg_type == "tensor" + return torch.ones(inp.shape[:-2], device=device) + + for tol_type in ["float", "tensor"]: + for atol_type, rtol_type in product(["none", tol_type], repeat=2): + if ( + not atol_type and not rtol_type + ): # default behavior, so skipped here so it's not tested 2 extra times + continue + for sample in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad + ): + assert sample.kwargs == {} + sample.kwargs = { + "atol": make_tol_arg(atol_type, sample.input), + "rtol": make_tol_arg(rtol_type, sample.input), + } + yield sample + + # default kwargs + yield from sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + + +def sample_inputs_linalg_pinv_singular( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to + test the backward method of `linalg_pinv`. That way we always preserve the rank of the + input no matter the perturbations applied to it by the gradcheck. + Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. + """ + batches = [(), (0,), (2,), (1, 1)] + # the size of at least 30 is required to cause failures for the previous implicit implementation + # of the pinv's backward method, albeit it is slow. + size = [0, 3, 50] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + # Note that by making the columns of `a` and `b` orthonormal we make sure that + # the product matrix `a @ b.t()` has condition number 1 when restricted to its image + a = ( + torch.rand(*batch, m, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + b = ( + torch.rand(*batch, n, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + yield SampleInput(a, args=(b,)) + + +def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # autograd is not supported for inputs with zero number of elements + shapes = ( + (S, S), + (2, S, S), + (2, 1, S, S), + ) + + for shape in shapes: + yield SampleInput(make_arg(shape)) + + +def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + shapes = ( + (), + (1,), + (S,), + (2, S), + ) + + for shape in shapes: + if len(shape) > 0 and shape[-1] > 1: + yield SampleInput(make_arg(shape)) + n = shape[-1] if len(shape) > 0 else 1 + for i in range(3): + # n-1, n, n+1 + N = n + i - 1 + if N < 2: + continue + yield SampleInput(make_arg(shape), kwargs=dict(N=N)) + + +def np_vander_batched(x, N=None): + # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) + if x.ndim == 0: + x = x[np.newaxis] + if x.ndim == 1: + y = np.vander(x, N=N, increasing=True) + return y + else: + if N is None: + N = x.shape[-1] + y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) + return y + + +def sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + # Cholesky factorization is for positive-definite matrices + single_well_conditioned_matrix = random_well_conditioned_matrix( + S, S, dtype=dtype, device=device + ) + batch_well_conditioned_matrices = random_well_conditioned_matrix( + 2, S, S, dtype=dtype, device=device + ) + single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH + batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH + + inputs = ( + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + single_pd, + batch_pd, + ) + test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) + for l in test_cases: + # generated lower-triangular samples + l.requires_grad = requires_grad + yield SampleInput(l) # upper=False by default + yield SampleInput( + l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False) + ) + + # generate upper-triangular inputs + u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) + yield SampleInput(u, kwargs=dict(upper=True)) + + +def sample_inputs_linalg_ldl_factor( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + + # Symmetric inputs + yield SampleInput( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # single matrix + yield SampleInput( + random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # batch of matrices + yield SampleInput( + torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # 0x0 matrix + yield SampleInput( + torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # zero batch of matrices + + # Hermitian inputs + # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ + magma_254_available = device.type == "cuda" and _get_magma_version() >= (2, 5, 4) + if dtype.is_complex and (device.type == "cpu" or magma_254_available): + yield SampleInput( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # single matrix + yield SampleInput( + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # batch of matrices + + +def sample_inputs_linalg_ldl_solve( + op_info, device, dtype, requires_grad=False, **kwargs +): + # Generate LDL factors of symmetric (and Hermitian on CPU) matrices + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + symmetric_inputs = ( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix + random_symmetric_pd_matrix( + S, 2, dtype=dtype, device=device + ), # batch of matrices + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + ) + hermitian_inputs = ( + ( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + ) + if device.type == "cpu" and dtype.is_complex + else () + ) + test_cases1 = ( + torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs + ) + test_cases2 = ( + torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs + ) + + # Symmetric case + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for test_case in test_cases1: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=False) + ) + + # Hermitian case + for test_case in test_cases2: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=True) + ) + + +def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + device = torch.device(device) + + drivers: Tuple[str, ...] + if device.type == "cuda": + drivers = ("gels",) + else: + drivers = ("gels", "gelsy", "gelss", "gelsd") + + # we generate matrices of shape (..., n + delta, n) + deltas: Tuple[int, ...] + if device.type == "cpu" or has_cusolver(): + deltas = (-1, 0, +1) + # only square systems if Cusolver is not available + # becase we solve a lstsq problem with a transposed matrix in the backward + else: + deltas = (0,) + + for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): + shape = batch + (3 + delta, 3) + a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) + a.requires_grad_(requires_grad) + b = make_tensor( + shape, + dtype=dtype, + device=device, + low=None, + high=None, + requires_grad=requires_grad, + ) + yield SampleInput(a, b, driver=driver) + + +def error_inputs_lstsq(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d,)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d, None)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # Shapes for 2D Tensors + shapes_2d = ((S, S), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((S, S, S),) + + kwargs_2d = ({}, dict(offset=2), dict(offset=2), dict(offset=1)) + kwargs_3d = ( + dict(offset=1, dim1=1, dim2=2), + dict(offset=2, dim1=0, dim2=1), + dict(offset=-2, dim1=0, dim2=1), + ) + + for shape, kwarg in chain( + product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d) + ): + yield SampleInput(make_arg(shape), kwargs=kwarg) + + +def error_inputs_diagonal_diag_embed(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + shapes1d = (0, 1, (0,), (1,)) + shapes2d = ((M, L),) + shapes3d = ((M, S, L),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 == dim2 is not allowed + dict(dim1=1, dim2=1), + # out of bounds dims are not allowed + dict(dim1=10000), + dict(dim2=10000), + ) + + kwargs3d = kwargs2d + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + arg = make_arg(shape) + sample = SampleInput(input=arg, kwargs=kwargs) + + dim1 = kwargs.get("dim1") + dim2 = kwargs.get("dim2") + + if "diagonal" in op_info.name: + num_dim = arg.dim() + elif op_info.name in ("diag_embed", "_refs.diag_embed"): + # these are valid inputs for diag_embed + if shape in ((0,), (1,)): + continue + num_dim = arg.dim() + 1 + else: + raise RuntimeError("should be unreachable") + + bound1 = -num_dim + bound2 = num_dim - 1 + dim_range = range(bound1, bound2 + 1) + dim1_cond = dim1 and dim1 not in dim_range + dim2_cond = dim2 and dim2 not in dim_range + + if dim1 == dim2: + err = f"diagonal dimensions cannot be identical {dim1}, {dim2}" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + elif dim1_cond or dim2_cond: + err_dim = dim1 if dim1_cond else dim2 + err = ( + r"Dimension out of range \(expected to be in range of " + rf"\[{bound1}, {bound2}\], but got {err_dim}\)" + ) + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + else: + raise RuntimeError("should be unreachable") + + +def sample_inputs_linalg_cholesky( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates always positive-definite input for torch.linalg.cholesky using + random_hermitian_pd_matrix. + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + from torch.testing._internal.common_utils import random_hermitian_pd_matrix + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + for batch, n, upper in product(batches, ns, [True, False]): + a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) + a.requires_grad = requires_grad + yield SampleInput(a, upper=upper) + + +def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eig + """ + + def out_fn(output): + return output[0], abs(output[1]) + + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. + """ + + def out_fn(output): + if isinstance(output, tuple): + # eigh function + return output[0], abs(output[1]) + else: + # eigvalsh function + return output + + # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + # Note: we cannot use np.random.choice here as TorchDynamo + # does not support tensors of strings. + sample.kwargs = {"UPLO": random.choice(["L", "U"])} + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.pinv with hermitian=False keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + real_dtype = o.input.real.dtype if dtype.is_complex else dtype + # requires_grad path for rtol tensor is not implemented + for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): + o = clone_sample(o) + o.kwargs = {"rtol": rtol} + yield o + + +def sample_inputs_linalg_pinv_hermitian( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates input for torch.linalg.pinv with hermitian=True keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + o.kwargs = {"hermitian": True} + yield o + + +def sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs +): + """ + This function generates always solvable input for torch.linalg.solve + We sample a fullrank square matrix (i.e. invertible) A + The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. + The second input is generated as the product of 'batches', 'ns' and 'nrhs'. + In total this function generates 18 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices. + 'ns' gives 0x0 and 5x5 matrices. + and 'nrhs' controls the number of vectors to solve for: + () - using 1 as the number of vectors implicitly + (1,) - same as () but explicit + (3,) - solve for 3 vectors. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. + torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow + 1D tensors (vectors) as the right-hand-side. + Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, + 'vector_rhs_allowed' may be removed here as well. + """ + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_a = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_b = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,)] + ns = [5, 0] + if vector_rhs_allowed: + nrhs = [(), (1,), (3,)] + else: + nrhs = [(1,), (3,)] + + for n, batch, rhs in product(ns, batches, nrhs): + yield SampleInput(make_a(*batch, n, n), args=(make_b(batch + (n,) + rhs),)) + + +def sample_inputs_linalg_solve_triangular( + op_info, device, dtype, requires_grad=False, **kwargs +): + make_arg = partial(make_tensor, dtype=dtype, device=device) + bs = (1, 2, 0) + ns = (3, 0) + ks = (1, 3, 0) + + for b, n, k, (left, upper, uni) in product( + bs, ns, ks, product((True, False), repeat=3) + ): + if b == 1: + A = make_arg((n, n)) if left else make_arg((k, k)) + B = make_arg((n, k)) + else: + A = make_arg((b, n, n)) if left else make_arg((b, k, k)) + B = make_arg((b, n, k)) + if uni: + # Not really necessary, but writing it for consistency + A.diagonal(0, -2, -1).fill_(1.0) + else: + d = A.diagonal(0, -2, -1) + d[d.abs() < 1e-6] = 1.0 + if upper: + A.triu_() + else: + A.tril_() + kwargs = {"upper": upper, "left": left, "unitriangular": uni} + if requires_grad: + for grad_A, grad_B in product((True, False), repeat=2): + # Either A or B needs to have a gradient + if not grad_A and not grad_B: + continue + yield SampleInput( + A.clone().requires_grad_(grad_A), + args=(B.clone().requires_grad_(grad_B),), + kwargs=kwargs, + ) + else: + yield SampleInput(A, args=(B,), kwargs=kwargs) + + +def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates always solvable input for legacy solve functions + (the ones that are not in torch.linalg module). + The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation + should have b.ndim >= 2, vectors are not allowed. + Also the arguments order is swapped. + """ + out = sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False + ) + + def out_fn(output): + return output[0] + + # Reverses tensor order + for sample in out: + sample.input, sample.args = sample.args[0], (sample.input,) + if op_info.name == "solve": + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): + full_rank = op_info.name == "linalg.lu_factor" + make_fn = ( + make_tensor + if not full_rank + else make_fullrank_matrices_with_distinct_singular_values + ) + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + def out_fn(output): + if op_info.name == "linalg.lu": + return output[1], output[2] + else: + return output + + batch_shapes = ((), (3,), (3, 3)) + # pivot=False only supported in CUDA + pivots = (True, False) if torch.device(device).type == "cuda" else (True,) + deltas = (-2, -1, 0, +1, +2) + for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): + shape = batch_shape + (S + delta, S) + # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! + A = make_arg(shape) if not full_rank else make_arg(*shape) + yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) + + +def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, m, n in product(batches, ns, ns): + yield SampleInput(make_arg(batch + (m, n))) + + +def sample_inputs_linalg_qr_geqrf( + op_info, device, dtype, requires_grad=False, **kwargs +): + # QR is just well defined when the matrix is full rank + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, (m, n) in product(batches, product(ns, ns)): + shape = batch + (m, n) + yield SampleInput(make_arg(*shape)) + + +def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): + a_shapes = [(2, 3, 6), (3, 4, 4, 3)] + # Zero-dim tensors are not supported in NumPy, so we skip them for now. + # NumPy is used in reference check tests. + # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. + # a_shapes += [(0, 0, 1, 2, 3, 0)] + dimss = [None, (0, 2)] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for a_shape, dims in itertools.product(a_shapes, dimss): + a = make_arg(a_shape) + b = make_arg(a_shape[:2]) + yield SampleInput(a, b, dims=dims) + + +def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = make_fullrank_matrices_with_distinct_singular_values + + def make_input(): + return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) + + # lhs / rhs shape can have any number of dimensions as long as their product equals 12 + shapes = [ + ((2, 2, 3), (12, 1)), + ((4, 3), (6, 1, 2)), + ] + + for shape_lhs, shape_rhs in shapes: + inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() + inp.requires_grad_(requires_grad) + yield SampleInput(inp, ind=len(shape_lhs)) + + +op_db: List[OpInfo] = [ + OpInfo( + "linalg.cross", + ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), + op=torch.linalg.cross, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + aten_name="linalg_cross", + sample_inputs_func=sample_inputs_cross, + error_inputs_func=error_inputs_cross, + supports_out=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + check_batched_gradgrad=False, + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + variant_test_name="singular", + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_det_singular, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("The backward may give different results"), + "TestCommon", + "test_noncontiguous_samples", + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestBwdGradients", + "test_fn_grad", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + # Both Hessians are incorrect on complex inputs?? + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93044" + ), + "TestBwdGradients", + "test_fn_grad", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93045" + ), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.diagonal", + aten_name="linalg_diagonal", + aten_backward_name="diagonal_backward", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.float16, torch.chalf + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed, + ), + OpInfo( + "linalg.cholesky", + aten_name="linalg_cholesky", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.cholesky_ex", + aten_name="linalg_cholesky_ex", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vecdot", + aten_name="linalg_vecdot", + ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim), + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_linalg_vecdot, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1.2e-2, rtol=1.7e-2)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.cond", + aten_name="linalg_cond", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cond, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.eig", + aten_name="linalg_eig", + op=torch.linalg.eig, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eig, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_out", device_type="cpu" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + ), + OpInfo( + "linalg.eigvals", + aten_name="linalg_eigvals", + op=torch.linalg.eigvals, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigh", + aten_name="linalg_eigh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigvalsh", + aten_name="linalg_eigvalsh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.householder_product", + aten_name="linalg_householder_product", + op=torch.linalg.householder_product, + aliases=("orgqr",), + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + # TODO: backward uses in-place operations that vmap doesn't like + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_householder_product, + decorators=[ + skipCUDAIfNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}) + ), + DecorateInfo( + unittest.skip("Skipped! Flaky"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cpu", + dtypes=(torch.complex128,), + ), + ], + ), + OpInfo( + "linalg.ldl_factor", + aten_name="linalg_ldl_factor", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_factor_ex", + aten_name="linalg_ldl_factor_ex", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_solve", + aten_name="linalg_ldl_solve", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_solve, + decorators=[ + skipCUDAIf( + _get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1" + ), + skipCUDAIfNoCusolver, + skipCUDAIfRocm, + skipCPUIfNoLapack, + ], + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + dtypes=floating_and_complex_types(), + supports_out=True, + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # we skip gradient checks for this suite as they are tested in + # variant_test_name='grad_oriented' + DecorateInfo(unittest.skip("Skipped!"), "TestFwdGradients"), + DecorateInfo(unittest.skip("Skipped!"), "TestBwdGradients"), + # The values for attribute 'shape' do not match + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + variant_test_name="grad_oriented", + # gradchecks for forward AD fails with multi-Tensor outputs + op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], + supports_out=False, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq_grad_oriented, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # tests do not work with passing lambda for op + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + ), + ), + OpInfo( + "linalg.matrix_power", + aliases=("matrix_power",), + aten_name="linalg_matrix_power", + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_power, + ), + OpInfo( + "linalg.multi_dot", + # Need this lambda because gradcheck does not work with TensorList inputs + aten_name="linalg_multi_dot", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_inplace_autograd=False, + # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_multi_dot, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo( + unittest.skip("67470!"), "TestCommon", "test_noncontiguous_samples" + ), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo( + unittest.skip("Skipped!"), + "TestOpInfo", + device_type="xla", + dtypes=(torch.long,), + ), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + device_type="cpu", + dtypes=(torch.long,), + ), + ), + ), + # NB: linalg.norm has two variants so that different skips can be used for different sample inputs + OpInfo( + "linalg.norm", + aten_name="linalg_norm", + op=torch.linalg.norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_norm, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.norm", + op=torch.linalg.norm, + variant_test_name="subgradients_at_zero", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=partial( + sample_inputs_linalg_norm, variant="subgradient_at_zero" + ), + aten_name="linalg_norm", + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # [NEW] Skips specifically for sample inputs at zero + # norm's vjp/jvp are not well-conditioned near zero + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_fn_fwgrad_bwgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_forward_mode_AD" + ), + DecorateInfo(unittest.expectedFailure, "TestBwdGradients", "test_fn_grad"), + ), + ), + OpInfo( + "linalg.matrix_norm", + aten_name="linalg_matrix_norm", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_norm, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.qr", + aten_name="linalg_qr", + op=torch.linalg.qr, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.slogdet", + aten_name="linalg_slogdet", + op=torch.linalg.slogdet, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vander", + aten_name="linalg_vander", + ref=np_vander_batched, + op=torch.linalg.vander, + dtypes=all_types_and_complex(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_vander, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + ReductionOpInfo( + "linalg.vector_norm", + op=torch.linalg.vector_norm, + identity=0, + nan_policy="propagate", + supports_multiple_dims=True, + complex_to_real=True, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + generate_args_kwargs=sample_kwargs_vector_norm, + aten_name="linalg_vector_norm", + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + OpInfo( + "linalg.lu_factor", + aten_name="linalg_lu_factor", + op=torch.linalg.lu_factor, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_factor_ex", + aten_name="linalg_lu_factor_ex", + op=torch.linalg.lu_factor_ex, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu", + aten_name="linalg_lu", + op=torch.linalg.lu, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_solve", + op=torch.linalg.lu_solve, + aten_name="linalg_lu_solve", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo( + unittest.skip("Tests different backward paths"), + "TestCommon", + "test_floating_inputs_are_differentiable", + ), + ), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + ), + OpInfo( + "linalg.inv", + aten_name="linalg_inv", + op=torch.linalg.inv, + aliases=("inverse",), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.inv_ex", + aten_name="linalg_inv_ex", + op=torch.linalg.inv_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve", + aten_name="linalg_solve", + op=torch.linalg.solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_ex", + aten_name="linalg_solve_ex", + op=torch.linalg.solve_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_triangular", + aten_name="linalg_solve_triangular", + op=torch.linalg.solve_triangular, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve_triangular, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); + supports_forward_ad=True, + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_matrix_rank, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + # jit doesn't accept tensor inputs for matrix rank + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=[torch.complex64, torch.float32], + ), + ), + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + op=torch.linalg.pinv, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # errors with "leaked XXXX bytes CUDA memory on device 0" + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="singular", + # pinv is Frechet-differentiable in a rank-preserving neighborhood, + # so we feed inputs that are the products of two full-rank factors, + # to avoid any rank changes caused by the perturbations in the gradcheck + op=lambda a, b: torch.linalg.pinv(a @ b.mT), + dtypes=floating_and_complex_types(), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv_singular, + # Only large tensors show issues with implicit backward used prior to + # explicit backward implementation. + decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # CUDA runs out of memory + DecorateInfo( + unittest.skip("Skipped!"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + # This test takes almost 2 hours to run! + DecorateInfo( + unittest.skip("Skipped!"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + # This test is flaky under slow gradcheck, likely due to rounding issues + DecorateInfo( + skipIfSlowGradcheckEnv, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.svd", + op=torch.linalg.svd, + aten_name="linalg_svd", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_svd, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.svdvals", + op=torch.linalg.svdvals, + aten_name="linalg_svdvals", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # We're using at::allclose, which does not have a batching rule + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_svdvals, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.tensorinv", + ref=np.linalg.tensorinv, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorinv, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.tensorsolve", + ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorsolve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=8e-04, rtol=7e-06)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), +] + +python_ref_db: List[OpInfo] = [ + # + # torch.linalg + # + PythonRefInfo( + "_refs.linalg.cross", + torch_opinfo_name="linalg.cross", + supports_out=True, + op_db=op_db, + skips=( + # TODO: is this really needed? + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_python_ref_errors" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.diagonal", + torch_opinfo_name="linalg.diagonal", + supports_out=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.vecdot", + torch_opinfo_name="linalg.vecdot", + op_db=op_db, + ), + ReductionPythonRefInfo( + "_refs.linalg.vector_norm", + torch_opinfo_name="linalg.vector_norm", + supports_out=True, + op_db=op_db, + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.matrix_norm", + torch_opinfo_name="linalg.matrix_norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.norm", + torch_opinfo_name="linalg.norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svd", + torch_opinfo_name="linalg.svd", + supports_out=True, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svdvals", + torch_opinfo_name="linalg.svdvals", + supports_out=True, + op_db=op_db, + ), +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py new file mode 100644 index 0000000000000000000000000000000000000000..ea678c2e4f87508f08833c13ce55873032bcb5bf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py @@ -0,0 +1,305 @@ +# mypy: ignore-errors + +from copy import copy +from functools import partial + +import torch +from torch.testing._internal.common_methods_invocations import op_db +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + ReductionOpInfo, + SampleInput, + UnaryUfuncInfo, +) +from torch.utils._pytree import tree_map + + +# random integer used for sizes +def _rnd(): + return torch.randint(3, 8, ()).item() + + +def _raggedness_matches(nt1, nt2): + return ( + nt1.is_nested + and nt2.is_nested + and nt1._ragged_idx == nt2._ragged_idx + and nt1.shape[nt1._ragged_idx] == nt2.shape[nt2._ragged_idx] + ) + + +# Generates a random NT. +# dims should be something like [5, None, 10], with None indicating that a +# random ragged structure should be used +def random_nt_from_dims( + dims, device=None, dtype=None, layout=torch.strided, requires_grad=False +): + sizes = [[d if d is not None else _rnd() for d in dims[1:]] for d in range(dims[0])] + return torch.nested.nested_tensor( + [torch.randn(*size) for size in sizes], + device=device, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + ) + + +# Helper function for generating a comprehensive set of NJT sample inputs. +def _sample_njts(device, dtype, requires_grad=False, dims=None): + if dims is None: + dims = [2, 3, 4] + if not isinstance(dims, (list, tuple)): + dims = [dims] + + # contiguous NJTs + for dim in dims: + # with min / max seqlen cached + shape = (_rnd(), None, *[_rnd() for _ in range(dim - 2)]) + nt = random_nt_from_dims( + shape, + device=device, + dtype=dtype, + requires_grad=requires_grad, + layout=torch.jagged, + ) + yield nt + + # without min / max seqlen cached + values = nt.values().clone().detach() + offsets = nt.offsets().clone().detach() + yield torch.nested.nested_tensor_from_jagged(values, offsets) + + # TODO: add non-contiguous NJTs + + +# Computes an unbind-based reference for a given OpInfo on a given SampleInput. +# This reference unbinds the input NJT and invokes the op on each of the components, +# optionally wrapping the result in an NJT. +def unbind_reference(op, sample, wrap_output_as_njt=True): + assert sample.input.is_nested + out_ref_components = [] + for i, component in enumerate(sample.input.unbind(dim=0)): + + def _slice_njts(t, i=i, inp=sample.input): + # any NJT with the same ragged structure as the input should + # also be sliced to pass to the reference + if isinstance(t, torch.Tensor) and _raggedness_matches(t, inp): + return t[i] + else: + return t + + args = tree_map(_slice_njts, sample.args) + kwargs = tree_map(_slice_njts, sample.kwargs) + + from torch._prims_common import canonicalize_dims + + # Need to adjust dim to apply on NJT component + if "dim" in kwargs: + kwargs["dim"] = canonicalize_dims(sample.input.dim(), kwargs["dim"]) - 1 + assert kwargs["dim"] >= 0 + + # TODO: handle this + assert "dims" not in kwargs + + out_ref_component = op.op(component, *args, **kwargs) + + # TODO: handle list / tuple / non-NJT outputs + assert not isinstance(out_ref_component, (list, tuple)) + out_ref_components.append(out_ref_component) + + if wrap_output_as_njt: + return torch.nested.as_nested_tensor(out_ref_components, layout=torch.jagged) + + return out_ref_components + + +# Computes the reference value for a reduction op. +def reduction_reference(op, sample): + assert sample.input.is_nested + dim = sample.kwargs.get("dim", None) + keepdim = sample.kwargs.get("keepdim", False) + assert dim != 0, "reductions over the batch dim are not supported" + assert "dims" not in sample.kwargs + assert sample.input._ragged_idx == 1 + + if dim is None: + # calculate reference value by running reduction on values buffer + return op.op(sample.input.values(), *sample.args, **sample.kwargs) + + if dim == sample.input._ragged_idx: + # calculate reference value by running an unbind reference and stacking + out_ref_components = unbind_reference(op, sample, wrap_output_as_njt=False) + return torch.stack(out_ref_components, dim=0) + + # unbind reference works for other reductions + return unbind_reference(op, sample) + + +def sample_inputs_elementwise_njt_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + yield SampleInput(njt, kwargs=dict(op_kwargs)) + + +def sample_inputs_elementwise_njt_binary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt1 in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + # TODO: account for non-contiguous NJTs here + # TODO: provide sample inputs for broadcasting cases and mixed (NT, T), (T, NT) inputs + njt2 = torch.randn_like(njt1) + yield SampleInput(njt1, args=(njt2,), kwargs=dict(op_kwargs)) + + +def sample_inputs_njt_reduction( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + # dim-wise reduction; includes reduction over the ragged dim + # NB: reduction over the batch dim is not supported! + # TODO: Cover this in the set of error inputs + for dim in range(1, njt.dim()): + for keepdim in [False, True]: + yield SampleInput( + njt, kwargs={**op_kwargs, "dim": dim, "keepdim": keepdim} + ) + + # full reduction + yield SampleInput(njt, kwargs=dict(op_kwargs)) + + +def unsupported_sample_inputs_func(op_name): + def _f(op_info, device, dtype, requires_grad, op_name=op_name, **kwargs): + raise RuntimeError( + f"OpInfo for {op_name} does not support NJT. Support can be added by modifying " + "torch/testing/_internal/opinfo/definitions/nested.py." + ) + + return _f + + +def unsupported_reference(op_name): + def _f(op, sample): + raise RuntimeError( + f"OpInfo for {op_name} does not define a ref() function. Support can be added by " + "modifying torch/testing/_internal/opinfo/definitions/nested.py." + ) + + return _f + + +# === BEGIN OP-SPECIFIC SAMPLE INPUTS FUNCS === +def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs): + # non-contiguous NJTs + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + yield SampleInput(njt) + + for memory_format in (torch.contiguous_format, torch.preserve_format): + # construct a "non-contiguous with holes" NJT + values = torch.randn( + 10, 5, device=device, dtype=dtype, requires_grad=requires_grad + ) + offsets = torch.tensor([0, 2, 4, 10], device=device, dtype=torch.int64) + lengths = torch.tensor([2, 1, 3], device=device, dtype=torch.int64) + njt = torch.nested.nested_tensor_from_jagged( + values, offsets=offsets, lengths=lengths + ) + + yield SampleInput(njt, kwargs={"memory_format": memory_format}) + + +def sample_inputs_mvl_gamma(p): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"p": p}) + + +def sample_inputs_polygamma_n(n): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n}) + + +def sample_inputs_special_polygamma_n(n): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n}) + + +def sample_inputs_masked_select( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2] + ): + yield SampleInput( + njt, kwargs={"mask": (torch.randn_like(njt, requires_grad=False) < 0.0)} + ) + + +sample_inputs_nn_functional_threshold = partial( + sample_inputs_elementwise_njt_unary, + op_kwargs={"threshold": float.fromhex("0x1.3ap-3"), "value": -9}, +) +# === END OP-SPECIFIC SAMPLE INPUTS FUNCS === + + +# Mapping of OpInfo full names -> sample_inputs_funcs, which define the set of sample inputs +# (involving NJTs) to pass to the op. Full name consists of the OpInfo's name and variant name +# separated by a period (e.g. special.polygamma.special_polygamma_n_0). These are necessary +# to specify if they cannot be auto-generated for some reason. Try to keep these sorted +# in alphabetical order! +njt_sample_inputs = { + "clone": sample_inputs_clone, + **{f"mvlgamma.mvlgamma_p_{p}": sample_inputs_mvl_gamma(p=1) for p in (1, 3, 5)}, + "nn.functional.threshold": sample_inputs_nn_functional_threshold, + **{f"polygamma.polygamma_n_{n}": sample_inputs_polygamma_n(n=n) for n in range(5)}, + "special.polygamma.special_polygamma_n_0": sample_inputs_special_polygamma_n(n=0), + "masked_select": sample_inputs_masked_select, +} + + +# Translates an OpInfo entry to one that operates on NJTs. +def translate_opinfo(op): + new_op = copy(op) + new_op.supports_njt = True + + if op.full_name in njt_sample_inputs: + new_op.sample_inputs_func = njt_sample_inputs[op.full_name] + # TODO: make the reference customizeable + new_op.ref = unbind_reference + elif isinstance(op, UnaryUfuncInfo): + new_op.sample_inputs_func = partial( + sample_inputs_elementwise_njt_unary, op_kwargs=None + ) + new_op.ref = unbind_reference + elif isinstance(op, BinaryUfuncInfo): + new_op.sample_inputs_func = partial( + sample_inputs_elementwise_njt_binary, op_kwargs=None + ) + new_op.ref = unbind_reference + elif isinstance(op, ReductionOpInfo): + new_op.sample_inputs_func = partial(sample_inputs_njt_reduction, op_kwargs=None) + new_op.ref = reduction_reference + # TODO: Translate the rest of the OpInfos + else: + new_op.sample_inputs_func = unsupported_sample_inputs_func(op.full_name) + new_op.ref = unsupported_reference(op.full_name) + new_op.supports_njt = False + + return new_op + + +njt_op_db = [translate_opinfo(op) for op in op_db] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..105590a71fb7d972bcbdfc2bfaaf3f59fbc4e335 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py @@ -0,0 +1,458 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from itertools import product +from typing import Callable, List, Tuple + +import numpy + +import torch +from torch.testing._internal.common_dtype import floating_types +from torch.testing._internal.common_utils import TEST_SCIPY +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + SampleInput, +) + + +if TEST_SCIPY: + import scipy.signal + + +def sample_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Base function used to create sample inputs for windows. + + For additional required args you should use *args, as well as **kwargs for + additional keyword arguments. + """ + + # Tests window sizes up to 5 samples. + for size, sym in product(range(6), (True, False)): + yield SampleInput( + size, + *args, + sym=sym, + device=device, + dtype=dtype, + requires_grad=requires_grad, + **kwargs, + ) + + +def reference_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Reference inputs function to use for windows which have a common signature, i.e., + window size and sym only. + + Implement other special functions for windows that have a specific signature. + See exponential and gaussian windows for instance. + """ + yield from sample_inputs_window( + op_info, device, dtype, requires_grad, *args, **kwargs + ) + + cases = (8, 16, 32, 64, 128, 256) + + for size in cases: + yield SampleInput(size, sym=False) + yield SampleInput(size, sym=True) + + +def reference_inputs_exponential_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"center": 4, "tau": 0.5}), + (16, {"center": 8, "tau": 2.5}), + (32, {"center": 16, "tau": 43.5}), + (64, {"center": 20, "tau": 3.7}), + (128, {"center": 62, "tau": 99}), + (256, {"tau": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + kw["center"] = None + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_gaussian_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"std": 0.1}), + (16, {"std": 1.2}), + (32, {"std": 2.1}), + (64, {"std": 3.9}), + (128, {"std": 4.5}), + (256, {"std": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_kaiser_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"beta": 2}), + (16, {"beta": 12}), + (32, {"beta": 30}), + (64, {"beta": 35}), + (128, {"beta": 41.2}), + (256, {"beta": 100}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_cosine_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"a": [0.5, 0.5]}), + (16, {"a": [0.46, 0.54]}), + (32, {"a": [0.46, 0.23, 0.31]}), + (64, {"a": [0.5]}), + (128, {"a": [0.1, 0.8, 0.05, 0.05]}), + (256, {"a": [0.2, 0.2, 0.2, 0.2, 0.2]}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_hamming_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"alpha": 0.54}), + (16, {"alpha": 0.5}), + (32, {"alpha": 0.23}), + (64, {"alpha": 0.8}), + (128, {"alpha": 0.9}), + (256, {"alpha": 0.05}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def error_inputs_window(op_info, device, *args, **kwargs): + # Tests for windows that have a negative size + yield ErrorInput( + SampleInput(-1, *args, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="requires non-negative window length, got M=-1", + ) + + # Tests for window tensors that are not torch.strided, for instance, torch.sparse_coo. + yield ErrorInput( + SampleInput( + 3, + *args, + layout=torch.sparse_coo, + device=device, + dtype=torch.float32, + **kwargs, + ), + error_type=ValueError, + error_regex="is implemented for strided tensors only, got: torch.sparse_coo", + ) + + # Tests for window tensors that are not floating point dtypes, for instance, torch.long. + yield ErrorInput( + SampleInput(3, *args, dtype=torch.long, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.int64", + ) + + # Tests for window tensors that are bfloat16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.bfloat16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.bfloat16", + ) + + # Tests for window tensors that are float16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.float16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.float16", + ) + + +def error_inputs_exponential_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, **kwargs) + + # Tests for negative decay values. + yield ErrorInput( + SampleInput(3, tau=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Tau must be positive, got: -1 instead.", + ) + + # Tests for symmetric windows and a given center value. + yield ErrorInput( + SampleInput(3, center=1, sym=True, dtype=torch.float32, device=device), + error_type=ValueError, + error_regex="Center must be None for symmetric windows", + ) + + +def error_inputs_gaussian_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, std=0.5, **kwargs) + + # Tests for negative standard deviations + yield ErrorInput( + SampleInput(3, std=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Standard deviation must be positive, got: -1 instead.", + ) + + +def error_inputs_kaiser_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, beta=12, **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, beta=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="beta must be non-negative, got: -1 instead.", + ) + + +def error_inputs_general_cosine_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, a=[0.54, 0.46], **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, a=None, dtype=torch.float32, device=device, **kwargs), + error_type=TypeError, + error_regex="Coefficients must be a list/tuple", + ) + + yield ErrorInput( + SampleInput(3, a=[], dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Coefficients cannot be empty", + ) + + +def reference_signal_window(fn: Callable): + r"""Wrapper for scipy signal window references. + + Discards keyword arguments for window reference functions that don't have a matching signature with + torch, e.g., gaussian window. + """ + + def _fn( + *args, + dtype=numpy.float64, + device=None, + layout=torch.strided, + requires_grad=False, + **kwargs, + ): + r"""The unused arguments are defined to disregard those values""" + return fn(*args, **kwargs).astype(dtype) + + return _fn + + +def make_signal_windows_opinfo( + name: str, + ref: Callable, + sample_inputs_func: Callable, + reference_inputs_func: Callable, + error_inputs_func: Callable, + *, + skips: Tuple[DecorateInfo, ...] = (), +): + r"""Helper function to create OpInfo objects related to different windows.""" + return OpInfo( + name=name, + ref=ref if TEST_SCIPY else None, + dtypes=floating_types(), + dtypesIfCUDA=floating_types(), + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=error_inputs_func, + supports_out=False, + supports_autograd=False, + skips=( + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # skip these tests since we have non tensor input + DecorateInfo( + unittest.skip("Skipped!"), "TestCommon", "test_noncontiguous_samples" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_conj_view"), + DecorateInfo( + unittest.skip("Skipped!"), "TestMathBits", "test_neg_conj_view" + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_neg_view"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_vmap_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_op_has_batch_rule", + ), + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + *skips, + ), + ) + + +op_db: List[OpInfo] = [ + make_signal_windows_opinfo( + name="signal.windows.hamming", + ref=reference_signal_window(scipy.signal.windows.hamming) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.hann", + ref=reference_signal_window(scipy.signal.windows.hann) if TEST_SCIPY else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.bartlett", + ref=reference_signal_window(scipy.signal.windows.bartlett) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.blackman", + ref=reference_signal_window(scipy.signal.windows.blackman) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.cosine", + ref=reference_signal_window(scipy.signal.windows.cosine) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.exponential", + ref=reference_signal_window(scipy.signal.windows.exponential) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, tau=2.78), + reference_inputs_func=partial(reference_inputs_exponential_window, tau=2.78), + error_inputs_func=error_inputs_exponential_window, + ), + make_signal_windows_opinfo( + name="signal.windows.gaussian", + ref=reference_signal_window(scipy.signal.windows.gaussian) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, std=1.92), + reference_inputs_func=partial(reference_inputs_gaussian_window, std=1.92), + error_inputs_func=error_inputs_gaussian_window, + skips=( + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + make_signal_windows_opinfo( + name="signal.windows.kaiser", + ref=reference_signal_window(scipy.signal.windows.kaiser) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, beta=12.0), + reference_inputs_func=partial(reference_inputs_kaiser_window, beta=12.0), + error_inputs_func=error_inputs_kaiser_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_cosine", + ref=reference_signal_window(scipy.signal.windows.general_cosine) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, a=[0.54, 0.46]), + reference_inputs_func=partial( + reference_inputs_general_cosine_window, a=[0.54, 0.46] + ), + error_inputs_func=error_inputs_general_cosine_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_hamming", + ref=reference_signal_window(scipy.signal.windows.general_hamming) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, alpha=0.54), + reference_inputs_func=partial( + reference_inputs_general_hamming_window, alpha=0.54 + ), + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.nuttall", + ref=reference_signal_window(scipy.signal.windows.nuttall) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..3e1f816d9f73fec05fa59ac8b863236242b85f7d --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py @@ -0,0 +1,925 @@ +# mypy: ignore-errors + +import os + +import torch +from torch.testing import make_tensor # noqa: F401 +from torch.testing._internal.opinfo.core import ( # noqa: F401 + BinaryUfuncInfo, + ErrorInput, + generate_elementwise_binary_tensors, + ReductionOpInfo, + sample_inputs_reduction, + SampleInput, +) + + +def _check_validate(op_info, sample): + def _check_fail(sample): + try: + op_info( + sample.sample_input.input, + *sample.sample_input.args, + **sample.sample_input.kwargs, + ) + except sample.error_type: + pass + except Exception as msg: + raise AssertionError( # noqa: B904 + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}" + ) + else: + raise AssertionError( + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got none." + ) + + def _check_success(sample): + try: + op_info(sample.input, *sample.args, **sample.kwargs) + except Exception as msg: + raise AssertionError( # noqa: B904 + f"{op_info.name} on {sample=} expected to succeed " + f", got {type(msg).__name__}: {msg}" + ) + + if isinstance(sample, ErrorInput): + _check_fail(sample) + else: + _check_success(sample) + + +def _sample_inputs_sparse( + sample_inputs, + maybe_failing_sample_inputs, + validate_sample_input, + op_info, + *args, + **kwargs, +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + # Error inputs are handled in error_inputs_sparse + + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + + +def _error_inputs_sparse( + maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, ErrorInput): + yield sample + # Sample inputs are handled in sample_inputs_sparse + + +def _apply_requires_grad_to_samples(sample_inputs): + """Decorator to _maybe_failing_sample_inputs_... generator functions + that clones and sets requires_grad argument to tensors in sample + input arguments. This is needed when the generated samples share + tensor instances. + """ + + def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs): + def apply_requires_grad(x): + if ( + not isinstance(x, torch.Tensor) + or x.requires_grad + or not requires_grad + or not (x.is_floating_point() or x.is_complex()) + ): + return x + return x.detach().clone().requires_grad_(requires_grad) + + if requires_grad: + for sample_input in sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ): + yield sample_input.transform(apply_requires_grad) + else: + yield from sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ) + + return wrapper + + +def sample_inputs_sparse_reduction( + op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs +): + """Sample inputs for reduction operations on sparse tensors.""" + layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0] + op_supports_layout = getattr(op_info, "supports_" + layout_name) + if not op_supports_layout: + return + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if sample_input.input.ndim == 0: + # scalar sparse tensors are not supported + continue + + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample_input.input.ndim < 2: + # conversion to sparse compressed tensors requires at + # least 2 dimensional tensors + continue + if sample_input.input.ndim > 2 and (sample_input.input == 0).any(): + # Skip batched sparse compressed samples that contain + # explicit zeros because to_sparse(layout=..) will + # fail, see gh-98495. + # TODO: remove this if-block after gh-98495 is fixed. + continue + + if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None: + blocksize = (1, 1) + + yield SampleInput( + sample_input.input.detach() + .to_sparse(layout=layout, blocksize=blocksize) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex): + # uncoalesced samples + inp = sample_input.input.detach().to_sparse(layout=layout) + inp = torch.sparse_coo_tensor( + inp.indices().repeat(1, 2), + inp.values().repeat(2), + inp.shape, + dtype=inp.dtype, + device=inp.device, + ) + assert not inp.is_coalesced() + yield SampleInput( + inp.requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if sample_input.input.ndim > 2: + # hybrid samples + yield SampleInput( + sample_input.input.detach() + .to_sparse( + layout=layout, + blocksize=blocksize, + dense_dim=sample_input.input.ndim - 2, + ) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + UNSPECIFIED = object() + if op_info.name == "sum": + sample = _validate_sample_input_sparse_reduction_sum(sample) + + if op_info.name in {"masked.sum"}: + mask = sample.kwargs.get("mask", UNSPECIFIED) + if ( + mask not in {None, UNSPECIFIED} + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif mask is UNSPECIFIED: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + + if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}: + t_inp = sample.input + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + mask = sample.kwargs.get("mask") + if ( + mask is not None + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif mask is None: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif ( + mask.layout is sample.input.layout + and mask.ndim > 2 + and op_info.name == "masked.mean" + ): + sample = ErrorInput( + sample, + error_type=TypeError, + error_regex=( + "where[(][)] received an invalid combination of arguments" + " - got [(]Tensor, Tensor, NoneType[)]" + ), + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif ( + sample.input.ndim > 2 + and (sample.kwargs.get("dim") not in {0, 1}) + and mask.ndim > 2 + and mask.layout is not torch.strided + ): + if sample.kwargs.get("dim") == (0, -1): + sample = ErrorInput( + sample, + error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities", + ) + elif op_info.name == "masked.prod": + sample = ErrorInput( + sample, + error_regex="input_dim == 2 INTERNAL ASSERT FAILED at", + ) + else: + sample = ErrorInput( + sample, + error_type=AssertionError, + error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + elif ( + mask.layout is t_inp.layout + and mask._nnz() != t_inp._nnz() + and t_inp.dense_dim() > 0 + ): + sample = ErrorInput( + sample, + error_regex="Index tensor must have the same number of dimensions as src tensor", + ) + + if check_validate: + _check_validate(op_info, sample) + + return sample + + +def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + dim = t_kwargs.get("dim") + keepdim = t_kwargs.get("keepdim") + layout = t_inp.layout + if isinstance(dim, (int, list, tuple)): + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}: + return ErrorInput( + sample, + error_regex=( + "Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout" + ), + ) + if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim: + return ErrorInput( + sample, + error_regex=( + "reduction operations on CSR tensors with keepdim=False is unsupported" + ), + ) + if t_inp.dim() != 2: + return ErrorInput( + sample, + error_regex=("input_dim == 2 INTERNAL ASSERT"), + ) + if layout == torch.sparse_csr: + if t_inp.dtype == torch.bool: + return ErrorInput( + sample, + error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"), + ) + if t_inp.dtype == torch.complex32: + return ErrorInput( + sample, + error_regex=( + "_sparse_csr_sum_cuda not implemented for 'ComplexHalf'" + ), + ) + return sample + + +def _maybe_failing_sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + if layout in [ + torch.sparse_csr, + torch.sparse_csc, + ]: + # NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend. + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + if layout in [ + torch.sparse_bsr, + torch.sparse_bsc, + ]: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(2, 2)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + +def sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for sum on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_reduction, + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs): + """Error inputs for sum on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def sample_inputs_sparse_elementwise_binary_operation( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for elementwise binary operations on sparse tensors. + + The samples include regular, zero-sized, batched, and hybrid + sparse tensors as well as rhs scalars. All tensors are full tensors. + """ + + def _to_sparse(tensor, **kwargs): + return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad) + + for sample_input in generate_elementwise_binary_tensors( + op_info, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=True, + **kwargs, + ): + lhs, rhs = sample_input.input, sample_input.args[0] + min_dense_dim = 0 + max_dense_dim = lhs.ndim - 1 + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if lhs.ndim < 2: + # sparse compressed tensors sparse_dim must be 2 + continue + max_dense_dim = lhs.ndim - 2 + + for dense_dim in range(min_dense_dim, max_dense_dim + 1): + if layout in {torch.sparse_bsr, torch.sparse_bsc}: + blocksizes = [(1, 1)] + if lhs.numel() > 0: + blocksizes.append( + ( + lhs.shape[lhs.ndim - 2 - dense_dim], + lhs.shape[lhs.ndim - 1 - dense_dim], + ) + ) + else: + blocksizes = [None] + for blocksize in blocksizes: + to_sparse_kwargs = dict( + layout=layout, dense_dim=dense_dim, blocksize=blocksize + ) + lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs) + rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs) + # op(sparse, sparse) + yield SampleInput( + lhs_sparse, + args=(rhs_sparse, *sample_input.args[1:]), + kwargs=sample_input.kwargs, + ) + # op(sparse, scalar) + yield SampleInput( + lhs_sparse, + args=( + make_tensor( + (), dtype=dtype, device=device, requires_grad=requires_grad + ), + *sample_input.args[1:], + ), + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_elementwise_binary_sparse_mul(sample): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + layout = t_inp.layout + dtype = t_inp.dtype + if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex=( + "coo_to_sparse_csr: conversion from Sparse to SparseCsr for input" + " tensors with sparse_dim[(][)]!=2 is not supported" + ), + ) + elif layout is torch.sparse_csc and t_args[0].ndim > 0: + return ErrorInput( + sample, error_regex="Expected result Tensor to be of format CSR" + ) + elif layout is torch.sparse_bsr and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr", + ) + elif layout is torch.sparse_bsc and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc", + ) + elif ( + layout is torch.sparse_coo + and dtype is torch.bool + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + and t_inp.dense_dim() > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'" + ) + elif ( + layout in {torch.sparse_coo, torch.sparse_csr} + and dtype is torch.bool + and t_inp._nnz() > 0 + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + ): + return ErrorInput( + sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'" + ) + elif ( + layout is torch.sparse_csr + and t_args[0].layout is torch.strided + and 0 < t_args[0].ndim + and t_args[0].ndim < t_inp.ndim + ): + return ErrorInput( + sample, error_regex="sparse_mask_sparse_csr expects self to be 2D" + ) + elif layout is torch.sparse_csr and ( + (t_args[0].layout is torch.strided and 0 < t_args[0].ndim) + or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape) + ): + return ErrorInput( + sample, + error_regex=( + "expects sparse inputs with equal dimensionality, number of sparse dimensions," + " and shape of sparse dimensions" + ), + ) + elif ( + layout is torch.sparse_csr + and t_inp.dense_dim() > 0 + and t_inp._nnz() > 0 + and t_inp.is_cpu + and dtype is torch.float16 + and t_args[0].ndim > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'" + ) + return sample + + +@_apply_requires_grad_to_samples +def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse( + layout=layout, dense_dim=0, blocksize=blocksize + ) + batch = torch.tensor( + [[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize) + hybrid = torch.tensor( + [[[1], [2]], [[3], [4]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize) + + if layout is torch.sparse_csr: + # RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor + yield SampleInput(batch, args=(batch,)) + # RuntimeError: Only tensors with two sparse dimensions can be + # converted to the SparseCsr layout, got self with 3 sparse + # dimensions. + yield SampleInput( + torch.zeros_like(hybrid).requires_grad_(requires_grad), + args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),), + ) + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_csc: + # RuntimeError: Expected result Tensor to be of format CSR + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsr: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsc: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_coo: + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if dtype in {torch.bool, torch.float16} and regular.is_cpu: + # RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)' + yield SampleInput(hybrid, args=(hybrid,)) + + +def _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=False +): + if op_info.name == "mul": + sample = _validate_sample_input_elementwise_binary_sparse_mul(sample) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs): + """Sample inputs for mul operation on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_elementwise_binary_operation, + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_mul(op_info, device, layout, **kwargs): + """Error inputs for mul operation on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + from torch.testing._internal.common_utils import TestCase + + for tensor in TestCase().generate_simple_inputs( + layout, + device=device, + dtype=dtype, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=False, + enable_non_contiguous_values=False, + ): + yield SampleInput(tensor, args=(), kwargs={}) + yield SampleInput( + tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout) + ) + + if dtype is not torch.float64: + yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64)) + + if torch.cuda.is_available(): + other_device = "cuda" if tensor.device.type == "cpu" else "cpu" + yield SampleInput(tensor, args=(), kwargs=dict(device=other_device)) + + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout)) + + if layout is not torch.sparse_coo: + yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo)) + + +def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False): + if sample.input.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + } and op_info.name not in {"zeros_like"}: + if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout: + return ErrorInput( + sample, + error_regex=( + "empty_like with different sparse layout is not supported" + " \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)" + ), + ) + if sample.input.layout is torch.sparse_coo: + return ErrorInput( + sample, + error_regex=( + "Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend." + ), + ) + if check_validate: + _check_validate(op_info, sample) + return sample + + +def _maybe_failing_sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + if torch.cuda.is_available() and layout is not torch.sparse_coo: + other_device = "cuda" if torch.device(device).type == "cpu" else "cpu" + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(device=other_device), + ) + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(layout=other_layout), + ) + + +def sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for like-functions on sparse tensors.""" + yield from _sample_inputs_sparse( + _sample_inputs_sparse_like_fns, + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs): + """Error inputs for like-functions on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _validate_sample_input_sparse_default(op_info, sample, check_validate=False): + if op_info.name == "to_sparse": + if ( + sample.input.layout + in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + and len(sample.args) == 1 + and isinstance(sample.args[0], int) + and sample.args[0] != 2 + ): + sample = ErrorInput( + sample, + error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse", + ) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def validate_sample_input_sparse(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + if isinstance(op_info, ReductionOpInfo): + return _validate_sample_input_sparse_reduction( + op_info, sample, check_validate=check_validate + ) + elif isinstance(op_info, BinaryUfuncInfo): + return _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=check_validate + ) + else: + return _validate_sample_input_sparse_default( + op_info, sample, check_validate=check_validate + ) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py new file mode 100644 index 0000000000000000000000000000000000000000..5b137799db8e57b211fe6446bee58dcba24dfd07 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py @@ -0,0 +1,843 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from itertools import product +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + precisionOverride, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import all_types_and, floating_types +from torch.testing._internal.common_utils import ( + TEST_SCIPY, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, +) +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + DecorateInfo, + L, + NumericsFilter, + OpInfo, + S, + SampleInput, + UnaryUfuncInfo, +) +from torch.testing._internal.opinfo.refs import ( + ElementwiseBinaryPythonRefInfo, + ElementwiseUnaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, +) + + +if TEST_SCIPY: + import scipy.special + + +# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, +# supports `exclude` argument. +# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 +def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): + exclude_zero = requires_grad and op_info.op == torch.special.i0e + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield SampleInput(make_arg((S,))) + yield SampleInput(make_arg(())) + + if requires_grad and not exclude_zero: + # Special Case for gradient + # Sample with `0` in the input + t = make_arg((S,)) + t[0] = 0 + + yield SampleInput(t) + + +def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, + device=device, + # TODO: eliminate low after gh-106692 is fixed: + low=(1 if dtype in {torch.int32, torch.int64} else None), + dtype=dtype, + requires_grad=requires_grad, + ) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + for shape, n in product(tensor_shapes, ns): + yield SampleInput(make_arg(shape), args=(n,)) + + +def reference_polygamma(x, n): + # WEIRD `scipy.special.polygamma` behavior + # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype + # dtype('float64') + # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype + # dtype('float32') + # + # Thus we cast output to the default torch dtype or preserve double + result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + if x.dtype == np.double: + result_dtype = np.double + return scipy.special.polygamma(n, x).astype(result_dtype) + + +def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): + low, _ = op_info.domain + + if requires_grad: + low = 0 + op_info._domain_eps + + make_arg = partial( + make_tensor, dtype=dtype, device=device, low=low, requires_grad=requires_grad + ) + yield SampleInput(make_arg((L,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_erfcx(op_info, device, dtype, requires_grad, **kwargs): + for shape in ((L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=-5, + requires_grad=requires_grad, + ), + ) + + +op_db: List[OpInfo] = [ + UnaryUfuncInfo( + "special.i0e", + aten_name="special_i0e", + ref=scipy.special.i0e if TEST_SCIPY else None, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.i1", + aten_name="special_i1", + ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) + if TEST_SCIPY + else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + ), + UnaryUfuncInfo( + "special.i1e", + aten_name="special_i1e", + ref=scipy.special.i1e if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.ndtr", + aten_name="special_ndtr", + decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), + ref=scipy.special.ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatch stub: unsupported device typemeta + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="meta", + ), + ), + ), + # A separate OpInfo entry for special.polygamma is needed to reorder the arguments + # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 + UnaryUfuncInfo( + "special.polygamma", + op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), + variant_test_name="special_polygamma_n_0", + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # lambda impl + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + sample_kwargs=lambda device, dtype, input: ({"n": 0}, {"n": 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1 + ), + ), + BinaryUfuncInfo( + "special.xlog1py", + aten_name="special_xlog1py", + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test -1 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=-0.99), + ), + BinaryUfuncInfo( + "special.zeta", + aten_name="special_zeta", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + # TODO: FIXME + # OpInfo entry to verify the gradient formula of `other`/`q` + # BinaryUfuncInfo('special.zeta', + # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), + # aten_name='special_zeta', + # variant_test_name='grad', + # dtypes=all_types_and(torch.bool), + # promotes_int_to_float=True, + # supports_autograd=True, + # supports_rhs_python_scalar=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable") + # ], + # skips=( + # # Lambda doesn't work in JIT test + # # AssertionError: JIT Test does not execute any logic + # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), + # )), + UnaryUfuncInfo( + "special.entr", + ref=scipy.special.entr if TEST_SCIPY else None, + aten_name="special_entr", + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_entr, + ), + UnaryUfuncInfo( + "special.ndtri", + ref=scipy.special.ndtri if TEST_SCIPY else None, + domain=(0, 1), + aten_name="special_ndtri", + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.log_ndtr", + aten_name="special_log_ndtr", + ref=scipy.special.log_ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.erfcx", + ref=scipy.special.erfcx if TEST_SCIPY else None, + aten_name="special_erfcx", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_erfcx, + ), + UnaryUfuncInfo( + "special.airy_ai", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + ), + ), + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y1 if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_h", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + # Greatest absolute difference: inf + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + DecorateInfo(unittest.skip("Hangs on ROCm 6.1"), active_if=TEST_WITH_ROCM), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_he", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.laguerre_polynomial_l", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.legendre_polynomial_p", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0e if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k1", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1e if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.spherical_bessel_j0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None, + supports_autograd=False, + ), +] + +python_ref_db: List[OpInfo] = [ + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j0", + torch_opinfo_name="special.bessel_j0", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j1", + torch_opinfo_name="special.bessel_j1", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.entr", + torch_opinfo_name="special.entr", + op_db=op_db, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.erfcx", + torch_opinfo_name="special.erfcx", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i0e", + torch_opinfo_name="special.i0e", + op_db=op_db, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1", + torch_opinfo_name="special.i1", + op_db=op_db, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1e", + torch_opinfo_name="special.i1e", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.log_ndtr", + torch_opinfo_name="special.log_ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtr", + torch_opinfo_name="special.ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtri", + torch_opinfo_name="special.ndtri", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.spherical_bessel_j0", + torch_opinfo_name="special.spherical_bessel_j0", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.zeta", + torch_opinfo_name="special.zeta", + supports_one_python_scalar=True, + op_db=op_db, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), +] diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..41973dc2c0518a55bb8e3ac664303fec1ce481f9 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py @@ -0,0 +1,273 @@ +# mypy: ignore-errors + +import collections +import warnings +from functools import partial, wraps +from typing import Sequence + +import numpy as np + +import torch +from torch.testing._internal.common_cuda import TEST_CUDA +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + all_types, + all_types_and, + all_types_and_complex, + all_types_and_complex_and, + all_types_and_half, + complex_types, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + floating_types_and, + floating_types_and_half, + integral_types, + integral_types_and, +) +from torch.testing._internal.common_utils import torch_to_numpy_dtype_dict + + +COMPLETE_DTYPES_DISPATCH = ( + all_types, + all_types_and_complex, + all_types_and_half, + floating_types, + floating_and_complex_types, + floating_types_and_half, + integral_types, + complex_types, +) + +EXTENSIBLE_DTYPE_DISPATCH = ( + all_types_and_complex_and, + floating_types_and, + floating_and_complex_types_and, + integral_types_and, + all_types_and, +) + +# Better way to acquire devices? +DEVICES = ["cpu"] + (["cuda"] if TEST_CUDA else []) + + +class _dynamic_dispatch_dtypes(_dispatch_dtypes): + # Class to tag the dynamically generated types. + pass + + +def get_supported_dtypes(op, sample_inputs_fn, device_type): + # Returns the supported dtypes for the given operator and device_type pair. + assert device_type in ["cpu", "cuda"] + if not TEST_CUDA and device_type == "cuda": + warnings.warn( + "WARNING: CUDA is not available, empty_dtypes dispatch will be returned!" + ) + return _dynamic_dispatch_dtypes(()) + + supported_dtypes = set() + for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half): + try: + samples = sample_inputs_fn(op, device_type, dtype, False) + except RuntimeError: + # If `sample_inputs_fn` doesn't support sampling for a given + # `dtype`, we assume that the `dtype` is not supported. + # We raise a warning, so that user knows that this was the case + # and can investigate if there was an issue with the `sample_inputs_fn`. + warnings.warn( + f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}" + ) + continue + + # We assume the dtype is supported + # only if all samples pass for the given dtype. + supported = True + for sample in samples: + try: + op(sample.input, *sample.args, **sample.kwargs) + except RuntimeError as re: + # dtype is not supported + supported = False + break + + if supported: + supported_dtypes.add(dtype) + + return _dynamic_dispatch_dtypes(supported_dtypes) + + +def dtypes_dispatch_hint(dtypes): + # Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH) + # and its string representation for the passed `dtypes`. + return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str") + + # CUDA is not available, dtypes will be empty. + if len(dtypes) == 0: + return return_type((), "()") + + set_dtypes = set(dtypes) + for dispatch in COMPLETE_DTYPES_DISPATCH: + # Short circuit if we get an exact match. + if set(dispatch()) == set_dtypes: + return return_type(dispatch, dispatch.__name__ + "()") + + chosen_dispatch = None + chosen_dispatch_score = 0.0 + for dispatch in EXTENSIBLE_DTYPE_DISPATCH: + dispatch_dtypes = set(dispatch()) + if not dispatch_dtypes.issubset(set_dtypes): + continue + + score = len(dispatch_dtypes) + if score > chosen_dispatch_score: + chosen_dispatch_score = score + chosen_dispatch = dispatch + + # If user passed dtypes which are lower than the lowest + # dispatch type available (not likely but possible in code path). + if chosen_dispatch is None: + return return_type((), str(dtypes)) + + return return_type( + partial(dispatch, *tuple(set(dtypes) - set(dispatch()))), + dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))), + ) + + +def is_dynamic_dtype_set(op): + # Detect if the OpInfo entry acquired dtypes dynamically + # using `get_supported_dtypes`. + return op.dynamic_dtypes + + +def str_format_dynamic_dtype(op): + fmt_str = f""" + OpInfo({op.name}, + dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, + dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, + ) + """ + + return fmt_str + + +def np_unary_ufunc_integer_promotion_wrapper(fn): + # Wrapper that passes PyTorch's default scalar + # type as an argument to the wrapped NumPy + # unary ufunc when given an integer input. + # This mimicks PyTorch's integer->floating point + # type promotion. + # + # This is necessary when NumPy promotes + # integer types to double, since PyTorch promotes + # integer types to the default scalar type. + + # Helper to determine if promotion is needed + def is_integral(dtype): + return dtype in [ + np.bool_, + bool, + np.uint8, + np.int8, + np.int16, + np.int32, + np.int64, + ] + + @wraps(fn) + def wrapped_fn(x): + # As the default dtype can change, acquire it when function is called. + # NOTE: Promotion in PyTorch is from integer types to the default dtype + np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + + if is_integral(x.dtype): + return fn(x.astype(np_dtype)) + return fn(x) + + return wrapped_fn + + +def reference_reduction_numpy(f, supports_keepdims=True): + """Wraps a NumPy reduction operator. + + The wrapper function will forward dim, keepdim, mask, and identity + kwargs to the wrapped function as the NumPy equivalent axis, + keepdims, where, and initiak kwargs, respectively. + + Args: + f: NumPy reduction operator to wrap + supports_keepdims (bool, optional): Whether the NumPy operator accepts + keepdims parameter. If it does not, the wrapper will manually unsqueeze + the reduced dimensions if it was called with keepdim=True. Defaults to True. + + Returns: + Wrapped function + + """ + + @wraps(f) + def wrapper(x: np.ndarray, *args, **kwargs): + # Copy keys into a set + keys = set(kwargs.keys()) + + dim = kwargs.pop("dim", None) + keepdim = kwargs.pop("keepdim", False) + + if "dim" in keys: + dim = tuple(dim) if isinstance(dim, Sequence) else dim + + # NumPy reductions don't accept dim=0 for scalar inputs + # so we convert it to None if and only if dim is equivalent + if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: + kwargs["axis"] = None + else: + kwargs["axis"] = dim + + if "keepdim" in keys and supports_keepdims: + kwargs["keepdims"] = keepdim + + if "mask" in keys: + mask = kwargs.pop("mask") + if mask is not None: + assert mask.layout == torch.strided + kwargs["where"] = mask.cpu().numpy() + + if "identity" in keys: + identity = kwargs.pop("identity") + if identity is not None: + if identity.dtype is torch.bfloat16: + identity = identity.cpu().to(torch.float32) + else: + identity = identity.cpu() + kwargs["initial"] = identity.numpy() + + result = f(x, *args, **kwargs) + + # Unsqueeze reduced dimensions if NumPy does not support keepdims + if keepdim and not supports_keepdims and x.ndim > 0: + dim = list(range(x.ndim)) if dim is None else dim + result = np.expand_dims(result, dim) + + return result + + return wrapper + + +def prod_numpy(a, *args, **kwargs): + """ + The function will call np.prod with type as np.int64 if the input type + is int or uint64 if is uint. This is necessary because windows np.prod uses by default + int32 while on linux it uses int64. + This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320 + + Returns: + np.prod of input + """ + if "dtype" not in kwargs: + if np.issubdtype(a.dtype, np.signedinteger): + a = a.astype(np.int64) + elif np.issubdtype(a.dtype, np.unsignedinteger): + a = a.astype(np.uint64) + + fn = reference_reduction_numpy(np.prod) + return fn(a, *args, **kwargs)