library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
grad_with_create_graph
def grad_with_create_graph(mod, x, target): y = mod(x, target) # Set create_graph=True to ensure that the sequence_nr # for backward ops continues to count down. (gx,) = torch.autograd.grad( y[0], x, create_graph=True, grad_outputs=grad_output ) return gx x = torch.rand(100, 16, 32, 32, requires_grad=True) target = torch.rand(1) mod = Model() args = [mod, x, target] grad_output = torch.tensor(1.0, requires_grad=True) compiled_f1 = torch.compile(backend="aot_eager")(grad_with_create_graph) model_instance = compiled_f1 with profile( activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True, ) as kineto_prof: res = model_instance(*args) bwd_set = set() prof_str = "SeqNr|Thread|FwdThread|Name\n" for event in kineto_prof.events(): if event.sequence_nr >= 0: prof_str = ( prof_str + f"{event.sequence_nr}|{event.thread}" f"|{event.fwd_thread}|{event.name}|\n" ) if re.search(r"Backward[01]", event.name): bwd_set.add(event.sequence_nr) self.assertTrue(len(bwd_set), 13)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_aot_grad_mode_mutation
def test_aot_grad_mode_mutation(self): for compiler in ["aot_eager", "inductor"]: def f(x): y = x * x torch.set_grad_enabled(False) return y.clone(), y f_compiled = torch.compile(f, backend=compiler, fullgraph=True) torch.set_grad_enabled(True) x = torch.ones(3, requires_grad=True) * 3 y_ref = f(x) self.assertEqual(torch.is_grad_enabled(), False) torch.set_grad_enabled(True) y = f_compiled(x) self.assertEqual(torch.is_grad_enabled(), False) torch.set_grad_enabled(True) self.assertEqual(y_ref, y) self.assertIsNone(y_ref[0].grad_fn) self.assertIsNone(y[0].grad_fn) self.assertIsNotNone(y_ref[1].grad_fn) self.assertIsNotNone(y[1].grad_fn) # Check that the grad computed for the inputs, given the input, is the same # The tangent to `y[0]`, which has grad_required=False, is irrelevant self.assertEqual( sum(y_ref[1].grad_fn(torch.tensor([-1.0, 2.0, 0.0]))), sum( x for x in y[1].grad_fn.apply(None, torch.tensor([-1.0, 2.0, 0.0])) if x is not None ), )
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_aot_autograd_raises_invalid_leaf_set
def test_aot_autograd_raises_invalid_leaf_set(self): @torch.compile def f(x): x.set_(torch.ones(2)) # We still want to make sure that this raises x = torch.ones(2, requires_grad=True) with self.assertRaisesRegex( RuntimeError, "is being used in an in-place operation" ): f(x)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_data_ptr_access_copy
def test_data_ptr_access_copy(self): import torch._functorch.config as _config with _config.patch(fake_tensor_allow_unsafe_data_ptr_access=False): with FakeTensorMode(): x = torch.randn(3) y = copy.copy(x) self.assertEqual(y.shape, x.shape)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_data_ptr_access_fails_in_forward
def test_data_ptr_access_fails_in_forward(self): with torch.library._scoped_library("mylib", "FRAGMENT") as lib: torch.library.define("mylib::foo", "(Tensor x) -> Tensor", lib=lib) @torch.library.impl("mylib::foo", "CompositeImplicitAutograd", lib=lib) def _(x): x.data_ptr() return x.clone() x = torch.randn(3) def data_ptr_graph_input(x): r0 = torch.ops.mylib.foo(x) return r0 def data_ptr_graph_intermediate(x): y = x.clone() r0 = torch.ops.mylib.foo(y) return r0 tests = [data_ptr_graph_input, data_ptr_graph_intermediate] def ctx(): return self.assertRaisesRegex( RuntimeError, "Cannot access data pointer" ) for f in tests: with ctx(): make_fx(f, tracing_mode="fake")(x) with ctx(): make_fx(f, tracing_mode="symbolic")(x) with ctx(): torch.compile(f, backend="eager", fullgraph=True)(x)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
_
def _(x): x.data_ptr() return x.clone() x = torch.randn(3)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
expect_miss
def expect_miss(compiled_fn, a, b): self._clear_dynamo_and_codecache() counters.clear() res = compiled_fn(a, b) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual( counters["aot_autograd"]["autograd_cache_guard_miss"], 0, ) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) return res
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
expect_hit
def expect_hit(compiled_fn, a, b): self._clear_dynamo_and_codecache() counters.clear() res = compiled_fn(a, b) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 0) self.assertEqual( counters["aot_autograd"]["autograd_cache_guard_miss"], 0, ) self.assertEqual( counters["aot_autograd"]["autograd_cache_hit"], 1, ) return res
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
expect_guard_miss
def expect_guard_miss(compiled_fn, a, b): self._clear_dynamo_and_codecache() counters.clear() res = compiled_fn(a, b) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual( counters["aot_autograd"]["autograd_cache_guard_miss"], 1, ) self.assertEqual( counters["aot_autograd"]["autograd_cache_hit"], 0, ) return res compiled_fn = torch.compile(fn, dynamic=True) a_shape = (5, 6) b_shape = (7, 8) a = torch.rand(a_shape, device=device, dtype=dtype) b = torch.rand(b_shape, device=device, dtype=dtype) res1 = expect_miss(compiled_fn, a, b) # Same shape, should cache hit a2 = a.detach().clone() b2 = b.detach().clone() res2 = expect_hit(compiled_fn, a2, b2) self.assertEqual(res1, res2) # By changing the shape greatly, despite the same exact input # graph, inductor should report a guard miss, leading # to a cache miss on our end. a_shape = (5, 6) b_shape = (47000, 47001) a3 = torch.rand(a_shape, device=device, dtype=dtype) b3 = torch.rand(b_shape, device=device, dtype=dtype) expect_guard_miss(compiled_fn, a3, b3) # Wobble the shape a bit, but not enough # to trigger a guard miss (since 6, 7 is still less than int32) # Should result in a cache hit a_shape = (6, 7) b_shape = (47000, 47001) a4 = torch.rand(a_shape, device=device, dtype=dtype) b4 = torch.rand(b_shape, device=device, dtype=dtype) expect_hit(compiled_fn, a4, b4) # Change the shape back to the original, # FXGraphCache should hit because it stores # multiple entries a_shape = (5, 6) b_shape = (7, 8) a5 = torch.rand(a_shape, device=device, dtype=dtype) b5 = torch.rand(b_shape, device=device, dtype=dtype) expect_hit(compiled_fn, a5, b5)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_split_with_sizes_aot_autograd_cleans_up_traceback_meta
def test_split_with_sizes_aot_autograd_cleans_up_traceback_meta(self): from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks def fn(result, split_sizes): rs = torch.ops.aten.split_with_sizes(result, split_sizes.tolist()) return rs example_inputs = ( torch.randn(32, requires_grad=True), torch.tensor((7, 16, 9)), ) outs = fn(*example_inputs) setup_stacktrace_preservation_hooks([out.grad_fn for out in outs]) with fx_traceback.preserve_node_meta(): (outs[0].sum() + outs[1].sum() + outs[2].sum()).backward() self.assertNotIn("grad_fn_seq_nr", fx_traceback.current_meta) self.assertNotIn("in_grad_fn", fx_traceback.current_meta) # https://github.com/pytorch/pytorch/issues/110121
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_aot_export_joint_simple_repro
def test_aot_export_joint_simple_repro(self): class Mod(torch.nn.Module): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.linear = torch.nn.Linear(5, 7) def forward(self, x): return self.linear(x) def mini_backend(gm, sample_inputs): from torch._functorch.aot_autograd import aot_export_joint_simple fake_mode = torch._dynamo.utils.detect_fake_mode(sample_inputs) with patch.object(fake_mode, "allow_non_fake_inputs", True), fake_mode: return aot_export_joint_simple(gm, sample_inputs, trace_joint=False) sample_inputs = [torch.rand((3, 4, 5))] model = Mod() m_compiled = torch.compile(model, backend=mini_backend) out_ref = model(*sample_inputs) out_test = m_compiled(*sample_inputs) self.assertEqual(out_ref, out_test)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
mini_backend
def mini_backend(gm, sample_inputs): from torch._functorch.aot_autograd import aot_export_joint_simple fake_mode = torch._dynamo.utils.detect_fake_mode(sample_inputs) with patch.object(fake_mode, "allow_non_fake_inputs", True), fake_mode: return aot_export_joint_simple(gm, sample_inputs, trace_joint=False) sample_inputs = [torch.rand((3, 4, 5))] model = Mod() m_compiled = torch.compile(model, backend=mini_backend) out_ref = model(*sample_inputs) out_test = m_compiled(*sample_inputs) self.assertEqual(out_ref, out_test)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_eager_sequence_nr
def test_eager_sequence_nr(self): class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d( in_channels=16, out_channels=16, kernel_size=(1, 1), stride=1, padding="same", bias=True, ) self.bn1 = torch.nn.BatchNorm2d(num_features=16) self.relu1 = torch.nn.ReLU() self.fc1 = torch.nn.Linear(in_features=1638400, out_features=1) self.loss_fn = torch.nn.L1Loss() def forward(self, x, target): y = x x = self.conv1(x) x = self.bn1(x) x = self.relu1(x) x = x + y x = torch.flatten(x) x = self.fc1(x) output = self.loss_fn(x, target) return (output,) def grad_with_create_graph(mod, x, target): y = mod(x, target) # Set create_graph=True to ensure that the sequence_nr # for backward ops continues to count down. (gx,) = torch.autograd.grad( y[0], x, create_graph=True, grad_outputs=grad_output ) return gx x = torch.rand(100, 16, 32, 32, requires_grad=True) target = torch.rand(1) mod = Model() args = [mod, x, target] grad_output = torch.tensor(1.0, requires_grad=True) compiled_f1 = torch.compile(backend="aot_eager")(grad_with_create_graph) model_instance = compiled_f1 with profile( activities=[torch.profiler.ProfilerActivity.CPU], record_shapes=True, ) as kineto_prof: res = model_instance(*args) bwd_set = set() prof_str = "SeqNr|Thread|FwdThread|Name\n" for event in kineto_prof.events(): if event.sequence_nr >= 0: prof_str = ( prof_str + f"{event.sequence_nr}|{event.thread}" f"|{event.fwd_thread}|{event.name}|\n" ) if re.search(r"Backward[01]", event.name): bwd_set.add(event.sequence_nr) self.assertTrue(len(bwd_set), 13)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
__init__
def __init__(self) -> None: super().__init__() self.fn = CustomFunc1.apply
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class Module2(torch.nn.Module): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_autograd_function_equivalence
def test_autograd_function_equivalence(self): for grad in [True, False]: for i in range(1, 5): torch._dynamo.reset() model = globals()[f"Module{i}"]() opt_model = torch._dynamo.optimize("eager")(model) self.assertTrue( torch.allclose( opt_model(torch.ones(2, 3, requires_grad=grad)), torch.tensor([2.0], requires_grad=grad), ) )
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_autograd_function_has_graph_break
def test_autograd_function_has_graph_break(self): for grad in [True, False]: x = torch.randn(10, requires_grad=grad) for model in [Module5(), Module6()]: torch._dynamo.reset() cnts = torch._dynamo.testing.CompileCounter() opt_model = torch._dynamo.optimize(cnts)(model) for _ in range(3): ref = model(x) res = opt_model(x) self.assertTrue(torch.allclose(ref, res)) self.assertEqual(cnts.frame_count, 2)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_nn_module_with_params_global_constant
def test_nn_module_with_params_global_constant(self): class MyMod(torch.nn.Module): CONSTANT = torch.tensor([[2, 2], [2, 2]]) def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.randn([2, 2])) def forward(self, x): return x.sin() + self.param + MyMod.CONSTANT with torch.no_grad(): compiled_fn = torch.compile(MyMod(), backend="inductor", fullgraph=True) res1 = compiled_fn(torch.ones([2, 2])) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) self._clear_dynamo_and_codecache() res2 = compiled_fn(torch.ones([2, 2])) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) self.assertEqual(res1, res2) # Edit the "constant". We'll get a cache hit, # but it should result in a different result when run # because MyMod.CONSTANT is an input to the graph MyMod.CONSTANT = torch.tensor([[3, 3], [3, 3]]) self._clear_dynamo_and_codecache() res3 = compiled_fn(torch.ones([2, 2])) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 2) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) self.assertNotEqual(res1, res3) self.assertEqual(res1, res3.sub(torch.ones(2, 2)))
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @instantiate_parametrized_tests class AOTAutogradCacheTests(InductorTestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
forward
def forward(self, x): return x.sin() + self.param + MyMod.CONSTANT
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU class MyMod(torch.nn.Module): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
default_config
def default_config(self): return AOTConfig( fw_compiler=None, bw_compiler=None, inference_compiler=None, partition_fn=None, decompositions={}, num_params_buffers=0, aot_id=0, keep_inference_input_mutations=False, dynamic_shapes=True, aot_autograd_arg_pos_to_source=None, is_export=False, no_tangents=False, enable_log=False, )
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
_get_dynamo_output
def _get_dynamo_output(self, fn, *args, **kwargs): # Reset dynamo between runs torch._dynamo.reset() fx_graph = None example_inputs = None def compiler(gm, inputs, **kwargs): nonlocal fx_graph nonlocal example_inputs fx_graph = gm example_inputs = inputs return gm g = torch.compile(fn, backend=compiler, fullgraph=True) result = g(*args, **kwargs) return (result, fx_graph, example_inputs)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
compiler
def compiler(gm, inputs, **kwargs): nonlocal fx_graph nonlocal example_inputs fx_graph = gm example_inputs = inputs return gm g = torch.compile(fn, backend=compiler, fullgraph=True) result = g(*args, **kwargs) return (result, fx_graph, example_inputs)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
gen_cache_key
def gen_cache_key(self, f, config, inputs=None): if inputs is None: inputs = [torch.ones(3)] _, fx_g, example_inputs = self._get_dynamo_output(f, *inputs) return autograd_cache_key(fx_g, example_inputs, config, {})
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn2
def fn2(x): y = x.sin() z = y.cos() return z # Make the id different, but otherwise identical config = self.default_config() config2 = self.default_config() config2.aot_id = 1 c1 = self.gen_cache_key(fn, config) c2 = self.gen_cache_key(fn, config2) self.assertEqual(c1, c2)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn2
def fn2(x): y = x.sin() z = y.cos() return z # Make the id different, but otherwise identical config = self.default_config() config2 = self.default_config() config2.aot_id = 1 c1 = self.gen_cache_key(fn, config) c2 = self.gen_cache_key(fn, config2) self.assertEqual(c1, c2)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_incompatible_function
def test_incompatible_function(self): @torch._dynamo.allow_in_graph class AllowInGraphFunc(torch.autograd.Function): @staticmethod def forward(_, x): torch._dynamo.graph_break() return x.sin() def fn(x): return AllowInGraphFunc.apply(x) config = self.default_config() self.assertRaises( BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config) )
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
forward
def forward(self, x): return x.sin() + self.param + MyMod.CONSTANT
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU class MyMod(torch.nn.Module): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_private_namespace
def test_private_namespace(self): # TODO: anyone who monkeypatches a **public** function into torch namespace with @allow_in_graph # could still break our sanity check and cache something bad. But that's an edge case we'll take the risk on. # Monkeypatch some random private function into torch, see that it fails @torch._dynamo.allow_in_graph def my_private_fun(x): return x.sin() with patch("torch._my_priv", new=my_private_fun, create=True): def fn(x): return torch._my_priv(x) config = self.default_config() self.assertRaises( BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config) )
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
my_private_fun
def my_private_fun(x): return x.sin() with patch("torch._my_priv", new=my_private_fun, create=True): def fn(x): return torch._my_priv(x) config = self.default_config() self.assertRaises( BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config) )
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_private_builtin
def test_private_builtin(self): # _foreach_add is a private torch function, but # it's also a builtin_function_or_method, so it should be allowed to be cached # since dynamo allows it in the graph def fn(x, b): y = (x, x) return torch._foreach_add(y, b) config = self.default_config() r1 = self.gen_cache_key(fn, config, inputs=[torch.ones(3), 1]) r2 = self.gen_cache_key(fn, config, inputs=[torch.ones(3), 2]) self.assertNotEqual(r1, r2)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_nn_module_with_params
def test_nn_module_with_params(self): class MyMod(torch.nn.Module): def __init__(self) -> None: super().__init__() self.seq = torch.nn.Parameter(torch.ones((3, 3))) def forward(self, x): return self.seq + x config = self.default_config() # Different inputs and parameters, but all the same size c1 = self.gen_cache_key(MyMod(), config, inputs=[torch.ones((3, 3))]) c2 = self.gen_cache_key(MyMod(), config, inputs=[torch.ones((3, 3))]) self.assertEqual(c1, c2)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
forward
def forward(self, x): return x.sin() + self.param + MyMod.CONSTANT
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU class MyMod(torch.nn.Module): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
test_normal_torch_function
def test_normal_torch_function(self): @torch._dynamo.allow_in_graph def fn(x): y = torch.sin(x) z = torch.cos(x) w = y + z w.abs() return w config = self.default_config() self.gen_cache_key(fn, config)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @inductor_config.patch("fx_graph_cache", True) class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
test_donated_buffer_with_retain_or_create_graph4
def test_donated_buffer_with_retain_or_create_graph4(self): # Gives non-empty bw_donated_idxs class Mod(torch.nn.Module): def __init__(self) -> None: super().__init__() self.param = torch.nn.Parameter(torch.zeros([3, 3])) def forward(self, x): return torch.nn.functional.relu(x) + self.param inp = torch.randn(3, 3, requires_grad=True) mod = torch.compile(Mod()) mod(inp).sum().backward() out = mod(inp).sum() with self.assertRaisesRegex( RuntimeError, r"This backward function was compiled with non-empty donated " r"buffers which requires create_graph=False and retain_graph=False. " r"Please keep backward\(create_graph=False, retain_graph=False\) " r"across all backward\(\) function calls, or set " r"torch._functorch.config.donated_buffer=False to disable " r"donated buffer.", ): out.backward(retain_graph=True)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
setUp
def setUp(self): """ Reset all counters and caches before each unit test """ super().setUp() counters.clear() self._clear_all_caches()
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @instantiate_parametrized_tests class AOTAutogradCacheTests(InductorTestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
_clear_all_caches
def _clear_all_caches(self): """ Clear every cache, including AOTAutogradCache and FXCache """ torch._inductor.codecache.FxGraphCache.clear() AOTAutogradCache.clear() self._clear_dynamo_and_codecache()
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU @instantiate_parametrized_tests class AOTAutogradCacheTests(InductorTestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_once_differentiable
def test_once_differentiable(self): from torch.autograd.function import once_differentiable torch._dynamo.utils.counters.clear() cnt = torch._dynamo.testing.CompileCounter() class ScaleGradient(torch.autograd.Function): @staticmethod def forward(ctx, x): return x @staticmethod @once_differentiable def backward(ctx, grad): return grad * 0.5 @torch.compile(backend=cnt, fullgraph=True) def fn(x): return ScaleGradient.apply(x) x = torch.randn(3, requires_grad=True) result = fn(x) self.assertEqual(result, ScaleGradient.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_classmethod
def test_classmethod(self): class Shake(torch.autograd.Function): @classmethod def forward(cls, ctx, foo): return foo + foo @classmethod def backward(cls, ctx, grad_output): return grad_output def f(x): return Shake.apply(x) x = torch.randn(4, 4, 4, 4, requires_grad=True) opt_m = torch.compile(backend="eager")(f) opt_m(x)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_linear_setup_context
def test_linear_setup_context(self): model = ModuleLinear() opt_model = torch._dynamo.optimize("eager", nopython=True)(model) input = torch.randn(2, 2, dtype=torch.double, requires_grad=True) weight = torch.randn(3, 2, dtype=torch.double, requires_grad=True) eager_result = model(input, weight) optim_result = opt_model(input, weight) self.assertEqual(optim_result, eager_result)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_materialize_grad
def test_materialize_grad(self): model = MaterializingGradModule() opt_model = torch._dynamo.optimize("eager")(model) x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) optim_result = opt_model(x) eager_result = model(x) self.assertEqual(optim_result, eager_result)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_print_in_bwd
def test_print_in_bwd(self): model = CustomFuncBwdPrintModule() opt_model = torch._dynamo.optimize("eager", nopython=True)(model) x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, "builtin: print"): opt_model(x)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_stride_in_bwd
def test_stride_in_bwd(self): torch._dynamo.utils.counters.clear() cnt = torch._dynamo.testing.CompileCounter() model = CustomFuncStrideModule() opt_model = torch.compile(backend=cnt)(model) x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) ref = model(x) res = opt_model(x) self.assertEqual(ref, res) self.assertEqual(cnt.frame_count, 1) # graph break: Illegal getattr invocation stride in strict mod. self.assertEqual( list(torch._dynamo.utils.counters["graph_break"].values()), [1] )
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_enum_arg
def test_enum_arg(self): from enum import Enum class SomeEnum(Enum): A = 0 B = 1 class Foo(torch.autograd.Function): @staticmethod def forward(ctx, x, e): if e is SomeEnum.A: return x.sin() else: return x.cos() @staticmethod def backward(ctx, g): return g @torch.compile(backend="eager", fullgraph=True) def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_save_for_bwd
def test_save_for_bwd(self): model = SaveForBwdModule() opt_model = torch._dynamo.optimize("eager", nopython=True)(model) x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) opt_model(x)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_allow_in_graph
def test_allow_in_graph(self): torch._dynamo.utils.counters.clear() cnt = torch._dynamo.testing.CompileCounter() @torch._dynamo.allow_in_graph class AllowInGraphFunc(torch.autograd.Function): @staticmethod def forward(ctx, x): torch._dynamo.graph_break() ctx.x0 = x.size(0) return x * 2 @staticmethod def backward(ctx, grad_out): return grad_out * ctx.x0 @torch.compile(backend=cnt, fullgraph=True) def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
relu
def relu(x): return torch.nn.functional.relu(x) with self.assertLogs(logger_name, level="INFO") as captured: relu(torch.rand([3, 3], requires_grad=True)).sum().backward() if is_dynamic_shape_test(self._testMethodName): # an extra symint exists expected_msg = "bw_donated_idxs=[1]" else: expected_msg = "bw_donated_idxs=[0]" # le is a donated buffer from relu FileCheck().check(expected_msg).run("\n".join(captured.output))
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
g
def g(activation, param2): return torch.matmul(activation, param2)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
g
def g(activation, param2): return torch.matmul(activation, param2)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_function_context_save_and_mark
def test_function_context_save_and_mark(self): mod = ModuleWithGradFunc(ContextSaveAndMark) args, kwargs = ([torch.rand([1])], {}) before = mod(*args, **kwargs) torch._dynamo.reset() compiled_model = torch._dynamo.optimize("eager")(mod) after = compiled_model(*args, **kwargs) self.assertEqual(before, after)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_function_context_mark_and_save
def test_function_context_mark_and_save(self): mod = ModuleWithGradFunc(ContextMarkAndSave) args, kwargs = ([torch.rand([1])], {}) before = mod(*args, **kwargs) torch._dynamo.reset() compiled_model = torch._dynamo.optimize("eager")(mod) after = compiled_model(*args, **kwargs) self.assertEqual(before, after)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_multi_output
def test_multi_output(self): torch._dynamo.utils.counters.clear() cnt = torch._dynamo.testing.CompileCounter() class Foo(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.clone(), x.clone() @staticmethod def backward(ctx, grad1, grad2): return grad1 + grad2 @torch.compile(backend=cnt, fullgraph=True) def f(x): return Foo.apply(x) x = torch.randn(3, requires_grad=True) result = f(x) self.assertEqual(result, Foo.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_amp_custom_fwd_bwd
def test_amp_custom_fwd_bwd(self): torch._dynamo.utils.counters.clear() cnt = torch._dynamo.testing.CompileCounter() class MyMM(torch.autograd.Function): @staticmethod @torch.amp.custom_fwd(device_type="cuda") def forward(ctx, a, b): ctx.save_for_backward(a, b) return a.mm(b) @staticmethod @torch.amp.custom_bwd(device_type="cuda") def backward(ctx, grad): a, b = ctx.saved_tensors return grad.mm(b.t()), a.t().mm(grad) @torch.compile(backend=cnt, fullgraph=True) def fn(a, b): return MyMM.apply(a, b) a = torch.randn([64, 64], dtype=torch.float32, requires_grad=True) grad = a.clone() res = fn(a, a) res.backward(grad) self.assertEqual(res, MyMM.apply(a, a)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
data_ptr_graph_input
def data_ptr_graph_input(x): r0 = torch.ops.mylib.foo(x) return r0
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
data_ptr_graph_intermediate
def data_ptr_graph_intermediate(x): y = x.clone() r0 = torch.ops.mylib.foo(y) return r0 tests = [data_ptr_graph_input, data_ptr_graph_intermediate]
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
ctx
def ctx(): return self.assertRaisesRegex( RuntimeError, "Cannot access data pointer" ) for f in tests: with ctx(): make_fx(f, tracing_mode="fake")(x) with ctx(): make_fx(f, tracing_mode="symbolic")(x) with ctx(): torch.compile(f, backend="eager", fullgraph=True)(x)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
backward
def backward(ctx, grad): (x,) = ctx.saved_tensors return x, grad
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Test(torch.autograd.Function): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
_
def _(x): x.data_ptr() return x.clone() x = torch.randn(3)
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_set_materialize_grads_no_graph_break
def test_set_materialize_grads_no_graph_break(self): class MulY(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.set_materialize_grads(True) return x * 3 @staticmethod def backward(ctx, grad_out): return grad_out * 3 @torch.compile(backend="eager", fullgraph=True) def f(x): return MulY.apply(x) x = torch.tensor(2.0, requires_grad=True) result = f(x) result.sum().backward() self.assertEqual(result, MulY.apply(x)) self.assertEqual(x.grad, 3.0)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added