library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_tensor_list_as_input
def test_tensor_list_as_input(self): class Foo(torch.autograd.Function): @staticmethod def forward(ctx, x, tl): ctx.save_for_backward(tl[0], tl[1]) return x.clone() * (tl[0] + tl[1]) @staticmethod def backward(ctx, grad): tl0, tl1 = ctx.saved_tensors return grad * (tl0 + tl1), None @torch.compile(backend="aot_eager", fullgraph=True) def f(x, tl): return Foo.apply(x, tl) x = torch.tensor(2.0, requires_grad=True) tl = [ torch.tensor(3.0, requires_grad=True), torch.tensor(4.0, requires_grad=True), ] result = f(x, tl) result.sum().backward() self.assertEqual(result, Foo.apply(x, tl)) self.assertEqual(x.grad, 7.0) self.assertEqual(tl[0].grad, None) self.assertEqual(tl[1].grad, None)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_multiple_different_non_tensor_inputs
def test_multiple_different_non_tensor_inputs(self): @dataclass class Weird: x: int b: torch.Tensor c: torch.Tensor class Foo(torch.autograd.Function): @staticmethod def forward(ctx, x, weird, z, tl): ctx.save_for_backward(weird.b, weird.c, tl[0], tl[1]) return x.clone() * weird.b * weird.c * tl[0] @staticmethod def backward(ctx, grad): b, c, tl0, _ = ctx.saved_tensors return grad * b * c * tl0, None, grad * 2, None @torch.compile(backend="aot_eager", fullgraph=True) def f(x, weird, z, tl): return Foo.apply(x, weird, z, tl) x = torch.tensor(2.0, requires_grad=True) weird = Weird( 1.2, torch.tensor(2.5, requires_grad=True), torch.tensor(3.5, requires_grad=True), ) z = torch.tensor(3.0, requires_grad=True) tl = [ torch.tensor(0.5, requires_grad=True), torch.tensor(0.6, requires_grad=True), ] result = f(x, weird, z, tl) result.sum().backward() self.assertEqual(result, Foo.apply(x, weird, z, tl)) self.assertEqual(x.grad, 2.5 * 3.5 * 0.5) self.assertEqual(z.grad, 2.0) self.assertEqual(weird.b.grad, None) self.assertEqual(weird.c.grad, None) self.assertEqual(tl[0].grad, None) self.assertEqual(tl[1].grad, None)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
foo
def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_smuggle_tensor_and_complex_structures
def test_smuggle_tensor_and_complex_structures(self): from torch.autograd import Function class Foo(Function): @staticmethod def forward(ctx, x): ctx.x0 = x ctx.x1 = [1, 2, 3] return x * 2 @staticmethod def backward(ctx, grad_out): x0mul = grad_out * ctx.x0 for i in ctx.x1: x0mul = (x0mul * i) + x0mul return x0mul cnts = torch._dynamo.testing.CompileCounter() @torch.compile(backend=cnts, fullgraph=True, dynamic=True) def foo(x): return Foo.apply(x) foo(torch.randn(2, requires_grad=True)) self.assertEqual(cnts.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
foo
def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
run_fn
def run_fn(a): out = MyFunc2.apply(a) return out.sum() class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp): return inp.view_as(inp) @staticmethod def backward(ctx, grad): return grad class MyAdder(torch.autograd.Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a @staticmethod def backward(ctx, grad): return grad, grad class InplaceMul(torch.autograd.Function): @staticmethod def forward(ctx, x): result = x.mul_(2) ctx.mark_dirty(result) return result @staticmethod def backward(ctx, grad_output): pass @staticmethod def jvp(ctx, x_t): if jvp_err: # noqa: F821 return x_t else: return x_t.mul_(2) class MyFn2(torch.autograd.Function): @staticmethod def forward(ctx, x, y): return x + y, x @staticmethod def vjp(ctx, gO1, gO2): return gO1 + gO2, gO1 @staticmethod def jvp(ctx, x_t, y_t): return x_t + y_t, fn(x_t) # noqa: F821 class MyFn3(torch.autograd.Function): @staticmethod def forward(ctx, inp, inplace): view = inp.clone()[:3] if inplace: view += 2 return view @staticmethod def backward(ctx, grad): return grad, None
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test
def test(): x = torch.ones(2, 4, 4).requires_grad_() mult2(x) x = torch.tensor(2).double().requires_grad_() double(x) double2(x) x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q, p = Identity.apply(x, y) a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) view_a = MyFn.apply(a) a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) c = MyAdder.apply(a.clone(), b) c.sum().backward() z = torch.tensor(1.0, requires_grad=True) x = z.clone() y = InplaceMul.apply(x) a = torch.tensor(1.0, dtype=torch.double, requires_grad=True) b = torch.tensor(1.0, dtype=torch.double, requires_grad=True) c = torch.tensor(1.0, dtype=torch.double) d = torch.tensor(1.0, dtype=torch.double) MyFn2.apply(a, b) MyFn2.apply(c, d) base = torch.rand(10, requires_grad=True) foo = MyFn3.apply(base, False) test() opt_test = torch._dynamo.optimize("eager")(test) opt_test()
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_tensor_subclass_intermediary_input
def test_tensor_subclass_intermediary_input(self): class FooTensor(torch.Tensor): @staticmethod def __new__(cls, data, config, scale): self = torch.Tensor._make_wrapper_subclass( cls, config[0], strides=config[1], storage_offset=config[2], dtype=config[3], layout=config[4], requires_grad=config[5], device=data.device, ) self._data = data self._config = config self._scale = scale return self def __repr__(self): return "FooTensor" def __tensor_flatten__(self): return ("_data",), ( self._config, self._scale, ) @staticmethod def __tensor_unflatten__(tensors, metadatas, outer_size, outer_stride): return FooTensor(tensors["_data"], metadatas[0], metadatas[1]) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs=None): # handling clone and view is so dynamo fakefication passes, it's not # intended to be handling user code if func == torch.ops.aten.clone.default: return FooTensor( args[0]._data.clone(), args[0]._config, args[0]._scale ) elif func == torch.ops.aten.view.default: new_data = args[0]._data.view(*args[1:]) return FooTensor(new_data, args[0]._config, args[0]._scale) raise NotImplementedError class foo_autograd_fn(torch.autograd.Function): @staticmethod def forward(ctx, x): # access some data from `x`, where `x` is a tensor subclass x2 = x._data + 1.0 # create and return a tensor subclass from within a torch.autograd.Function x3 = FooTensor(x2, x._config, x._scale) return x3._data @staticmethod def backward(ctx, g): return g x_ref = torch.randn(4, 4).requires_grad_(True) x = copy.deepcopy(x_ref) scale = torch.tensor(1.0) # Weird that this is needed, but not having this breaks a lot of things torch._dynamo.allow_in_graph(FooTensor) def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
__new__
def __new__(cls, data, config, scale): self = torch.Tensor._make_wrapper_subclass( cls, config[0], strides=config[1], storage_offset=config[2], dtype=config[3], layout=config[4], requires_grad=config[5], device=data.device, ) self._data = data self._config = config self._scale = scale return self
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable class FooTensor(torch.Tensor): from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
__repr__
def __repr__(self): return "FooTensor"
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable class FooTensor(torch.Tensor): from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
__torch_dispatch__
def __torch_dispatch__(cls, func, types, args, kwargs=None): # handling clone and view is so dynamo fakefication passes, it's not # intended to be handling user code if func == torch.ops.aten.clone.default: return FooTensor( args[0]._data.clone(), args[0]._config, args[0]._scale ) elif func == torch.ops.aten.view.default: new_data = args[0]._data.view(*args[1:]) return FooTensor(new_data, args[0]._config, args[0]._scale) raise NotImplementedError
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable class FooTensor(torch.Tensor): from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_backward_returns_none_for_tensor_input
def test_backward_returns_none_for_tensor_input(self): class Foo(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.save_for_backward(y) return x.clone() * y @staticmethod def backward(ctx, grad): (y,) = ctx.saved_tensors return grad * y, None @torch.compile(backend="aot_eager", fullgraph=True) def f(x, y): return Foo.apply(x, y) x = torch.tensor(2.0, requires_grad=True) y = torch.tensor(3.0, requires_grad=True) result = f(x, y) result.sum().backward() self.assertEqual(result, Foo.apply(x, y)) self.assertEqual(x.grad, 3.0) self.assertEqual(y.grad, None)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_function_with_bound_free_variable
def test_function_with_bound_free_variable(self): class LowerBound(torch.autograd.Function): @staticmethod def forward(ctx, inputs, bound): ctx.save_for_backward(inputs, inputs.new_ones(1) * bound) return inputs.clamp(min=bound) @staticmethod def backward(ctx, grad_output): inputs, bound = ctx.saved_tensors return (inputs >= bound) * grad_output, None class MyMod(torch.nn.Module): def __init__(self) -> None: super().__init__() self.gamma = torch.nn.Parameter(torch.rand([4, 128, 32, 32])) def forward(self, x): gamma = LowerBound.apply(self.gamma, 1) return x + gamma mod = MyMod() args, kwargs = ([torch.rand([4, 128, 32, 32])], {}) before = mod(*args, **kwargs) compiled_model = torch._dynamo.optimize("eager")(mod) after = compiled_model(*args, **kwargs) self.assertEqual(before, after) # I pulled all of these test cases from test_autograd.py # In the future, we should make the Dynamo test suite actually # run on test_autograd.py (it's disabled right now) and delete these.
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
mult1
def mult1(x): return x.prod(dim=-1).prod(dim=-1) class Mult(torch.autograd.Function): @staticmethod def forward(ctx, x): y = mult1(x) ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return (grad_output * y)[:, None, None] / x mult2 = Mult.apply class Double(torch.autograd.Function): @staticmethod def forward(ctx, x): y = x**2 ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, _ = ctx.saved_tensors return grad_output * 2 * x # this is equivalent, but uses the output of .forward() in .backward() class Double2(Double): @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return grad_output * 2 * y / x double = Double.apply double2 = Double2.apply class Identity(torch.autograd.Function): @staticmethod def forward(ctx, a, b): return a, a + b @staticmethod def backward(ctx, grad_a, grad_b): return grad_a + grad_b, grad_b class MyFunc2(torch.autograd.Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, gO): return torch.tensor(float("nan")).expand(10, 10)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
jvp
def jvp(ctx, x_t): if jvp_err: # noqa: F821 return x_t else: return x_t.mul_(2)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable class InplaceMul(torch.autograd.Function): from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
jvp
def jvp(ctx, x_t): if jvp_err: # noqa: F821 return x_t else: return x_t.mul_(2)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable class InplaceMul(torch.autograd.Function): from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
foo
def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_smuggle_symint_issue_111031
def test_smuggle_symint_issue_111031(self): from torch.autograd import Function class Foo(Function): @staticmethod def forward(ctx, x): ctx.x0 = x.size(0) return x * 2 @staticmethod def backward(ctx, grad_out): return grad_out * ctx.x0 cnts = torch._dynamo.testing.CompileCounter() @torch.compile(backend=cnts, fullgraph=True, dynamic=True) def foo(x): return Foo.apply(x) foo(torch.randn(2, requires_grad=True)) self.assertEqual(cnts.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
foo
def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_needs_input_grad
def test_needs_input_grad(self): cnt = torch._dynamo.testing.CompileCounter() class NeedsInputGradFunc(torch.autograd.Function): @staticmethod def forward(ctx, foo): result = foo + foo ctx.save_for_backward(result) return result @staticmethod @torch.compile(backend=cnt, fullgraph=True) def backward(ctx, grad_output): (result,) = ctx.saved_tensors if ctx.needs_input_grad[0]: return grad_output * result.sin() return None x = torch.randn(10, requires_grad=True) NeedsInputGradFunc.apply(x).sum().backward() self.assertEqual(x.grad.shape, x.shape) self.assertEqual(cnt.frame_count, 1) self.assertEqual(cnt.op_count, 2)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_repeated_save_for_backward_calls
def test_repeated_save_for_backward_calls(self): from torch.autograd import Function class Foo(Function): @staticmethod def forward(ctx, x, y): ctx.save_for_backward(x) ctx.save_for_backward(x, y) return x * y @staticmethod def backward(ctx, grad_out): x, y = ctx.saved_tensors return grad_out * x, grad_out * y cnts = torch._dynamo.testing.CompileCounter() def foo(x, y): return Foo.apply(x, y) x_ref = torch.randn(2, requires_grad=True) y_ref = torch.randn(2, requires_grad=True) x_test = x_ref.clone().detach().requires_grad_() y_test = y_ref.clone().detach().requires_grad_() out_ref = foo(x_ref, y_ref) out_ref.sum().backward() out_test = torch.compile(foo, backend=cnts)(x_test, y_test) out_test.sum().backward() self.assertEqual(cnts.frame_count, 1) self.assertEqual(out_ref, out_test) self.assertEqual(x_ref.grad, x_test.grad) self.assertEqual(y_ref.grad, y_test.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_mark_multi_output_non_differentiable
def test_mark_multi_output_non_differentiable(self): from torch.autograd import Function class MyFunction(Function): @staticmethod def forward(ctx, x, y, z): out1 = x.sin() out2 = y * 2 out3 = z + 3 ctx.mark_non_differentiable(out2, out3) return out1, out2, out3 @staticmethod def backward(ctx, grad1, grad2, grad3): return grad1.cos(), grad2, grad3 @torch.compile(backend="aot_eager", fullgraph=True) def fn(x, y, z): return MyFunction.apply(x, y, z) x = torch.tensor(10.0, requires_grad=True) y = torch.tensor(20.0, requires_grad=True) z = torch.tensor(30.0, requires_grad=True) ref1, ref2, ref3 = MyFunction.apply(x, y, z) res1, res2, res3 = fn(x, y, z) self.assertEqual(ref1, res1) self.assertEqual(ref2, res2) self.assertEqual(ref3, res3) # Ensure out1 requires gradients, out2 does not. self.assertTrue(ref1.requires_grad) self.assertTrue(res1.requires_grad) self.assertFalse(ref2.requires_grad) self.assertFalse(res2.requires_grad) self.assertFalse(ref3.requires_grad) self.assertFalse(res3.requires_grad) res1.sum().backward()
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_default_values
def test_default_values(self): from torch.autograd import Function class Foo(Function): @staticmethod def forward(ctx, x, alpha=0.99): return x @staticmethod def backward(ctx, grad_out): return grad_out @torch.compile def foo(x): return Foo.apply(x) # Make sure guards for default values do not crash foo(torch.randn(2)) foo(torch.randn(2, requires_grad=True))
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
foo
def foo(x, scale): config = ( x.size(), x.stride(), x.storage_offset(), x.dtype, x.layout, x.requires_grad, ) x = FooTensor(x, config, scale) x = foo_autograd_fn.apply(x) return x y_ref = foo(x_ref, scale) y_ref.sum().backward() foo_opt = torch.compile(foo, backend="eager") y = foo_opt(x, scale) y.sum().backward() self.assertEqual(y, y_ref) self.assertEqual(x.grad, x_ref.grad)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
fn
def fn(x): return AllowInGraphFunc.apply(x) x = torch.rand(2, 3, requires_grad=True) result = fn(x) self.assertEqual(result, AllowInGraphFunc.apply(x)) self.assertEqual(cnt.frame_count, 1)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
backward
def backward(ctx, grad_output): return grad_output
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class CustomFunc1(torch.autograd.Function): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
f
def f(x, enum): output = Foo.apply( x, enum, ) return output x = torch.tensor([[1.0, 2, 3], [4, 5, 6]], requires_grad=True) y = f(x, SomeEnum.A) self.assertEqual(y, x.sin())
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_autograd_function.py
test_triton_kernel_multiple_out
def test_triton_kernel_multiple_out(self): class Add(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.save_for_backward(x, y) ctx.t1 = x ctx.t2 = y output = torch.zeros_like(x) n_elements = output.numel() grid = lambda meta: ( # noqa: E731 triton.cdiv(n_elements, meta["BLOCK_SIZE"]), ) add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=16) return output, x @staticmethod def backward(ctx, grad_output, old_x): x, y = ctx.saved_tensors x1 = ctx.t1 y1 = ctx.t2 return old_x * x * x1 * grad_output, y * y1 * grad_output @torch.compile(fullgraph=True, backend="inductor") def f(x, y): z = Add.apply(x, y) return z x = torch.randn(10, device="cuda", requires_grad=True) y = torch.randn(10, device="cuda", requires_grad=True) z, _ = f(x, y) loss = z.sum() loss.backward() self.assertEqual(x + y, z)
import copy import math from dataclasses import dataclass import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda import triton from torch.testing._internal.triton_utils import add_kernel class AutogradFunctionTests(torch._dynamo.test_case.TestCase): from enum import Enum from torch.autograd.function import once_differentiable from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch.autograd import Function from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
_side_effect_stateful_fn2
def _side_effect_stateful_fn2(x, obj): obj.counter = obj.counter + 1 return _multiply(x)
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
_side_effectful_invoke2
def _side_effectful_invoke2(grad, fn): return trace_wrapped(grad, fn=fn) graph = None
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
inner_compiler
def inner_compiler(gm_, example_inputs_): nonlocal graph self.assertEqual(graph, None) graph = gm_ return inductor.compile(gm_, example_inputs_) return torch.compile( gm, backend=inner_compiler, fullgraph=True, dynamic=True )
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
fn
def fn(x, y): x.register_hook(_multiply_invoke) return x * y out = fn(x, y) grad_out = torch.tensor([2.0, 2.0]) out.backward(grad_out) self.assertEqual(x.grad, y * grad_out)
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
_graph_breaking_fn
def _graph_breaking_fn(x): print("Boo!") return _multiply(x)
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
_graph_break_invoke
def _graph_break_invoke(grad): return trace_wrapped(grad, fn=_graph_breaking_fn)
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_backward_higher_order_ops.py
fn
def fn(x, y): x.register_hook(_multiply_invoke) return x * y out = fn(x, y) grad_out = torch.tensor([2.0, 2.0]) out.backward(grad_out) self.assertEqual(x.grad, y * grad_out)
import functools import torch import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch import _inductor as inductor from torch._dynamo import compiled_autograd from torch._dynamo._trace_wrapped_higher_order_op import trace_wrapped from torch._dynamo.testing import normalize_gm from torch._dynamo.utils import counters from torch.fx.experimental.proxy_tensor import make_fx from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
maybe_skip
def maybe_skip(fn): if unet_2d is None: return unittest.skip("requires diffusers")(fn) return fn class TestBaseOutput(torch._dynamo.test_case.TestCase): @maybe_skip def test_create(self): def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1) @maybe_skip def test_assign(self): def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) tmp.sample = a + 2 return tmp args = [torch.randn(10)] obj1 = fn(*args) cnts = torch._dynamo.testing.CompileCounter() opt_fn = torch._dynamo.optimize_assert(cnts)(fn) obj2 = opt_fn(*args) self.assertTrue(same(obj1.sample, obj2.sample)) self.assertEqual(cnts.frame_count, 1) self.assertEqual(cnts.op_count, 2) def _common(self, fn, op_count): args = [ unet_2d.UNet2DOutput( sample=torch.randn(10), ) ] obj1 = fn(*args) cnts = torch._dynamo.testing.CompileCounter() opt_fn = torch._dynamo.optimize_assert(cnts)(fn) obj2 = opt_fn(*args) self.assertTrue(same(obj1, obj2)) self.assertEqual(cnts.frame_count, 1) self.assertEqual(cnts.op_count, op_count) @maybe_skip def test_getattr(self): def fn(obj: unet_2d.UNet2DOutput): x = obj.sample * 10 return x self._common(fn, 1) @maybe_skip def test_getitem(self): def fn(obj: unet_2d.UNet2DOutput): x = obj["sample"] * 10 return x self._common(fn, 1) @maybe_skip def test_tuple(self): def fn(obj: unet_2d.UNet2DOutput): a = obj.to_tuple() return a[0] * 10 self._common(fn, 1) @maybe_skip def test_index(self): def fn(obj: unet_2d.UNet2DOutput): return obj[0] * 10 self._common(fn, 1) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_base_output.py
fn
def fn(a): tmp = unet_2d.UNet2DOutput(a + 1) return tmp torch._dynamo.testing.standard_test(self, fn=fn, nargs=1, expected_ops=1)
import unittest.mock import torch import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same from diffusers.models import unet_2d from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
forward
def forward(self, x): return self.relu(self.linear(x))
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class ToyModel(torch.nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_compilation
def test_compilation(self): torch._dynamo.reset() model = ToyModel() cnt = CompileCounter() model.compile(backend=cnt) x = torch.randn(10, 10) model(x) self.assertEqual(cnt.frame_count, 1)
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_overwrite_call_impl
def test_overwrite_call_impl(self): torch._dynamo.reset() model = ToyModel() self.assertTrue(model._compiled_call_impl is None) model.compile() self.assertTrue(model._compiled_call_impl is not None)
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_save
def test_save(self): torch._dynamo.reset() model = ToyModel() model.compile() model(torch.randn(1, 10)) with tempfile.TemporaryDirectory() as tmpdirname: torch.save(model, os.path.join(tmpdirname, "model.pt")) loaded_model = torch.load(os.path.join(tmpdirname, "model.pt")) loaded_model(torch.randn(1, 10))
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_state_dict_save
def test_state_dict_save(self): torch._dynamo.reset() model = ToyModel() model.compile() model(torch.randn(1, 10)) with tempfile.TemporaryDirectory() as tmpdirname: torch.save(model.state_dict(), os.path.join(tmpdirname, "model.pt")) loaded_model = ToyModel() loaded_model.load_state_dict( torch.load(os.path.join(tmpdirname, "model.pt")) ) loaded_model(torch.randn(1, 10))
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_jit_save
def test_jit_save(self): torch._dynamo.reset() model = ToyModel() model.compile() model(torch.randn(1, 10)) scripted_model = torch.jit.script(model) with tempfile.TemporaryDirectory() as tmpdirname: torch.jit.save(scripted_model, os.path.join(tmpdirname, "model.pt")) loaded_model = torch.jit.load(os.path.join(tmpdirname, "model.pt")) loaded_model(torch.randn(1, 10))
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_compilation_callback
def test_compilation_callback(self): torch._dynamo.reset() @torch._dynamo.on_compile_start def start_callback(): print("Compilation started.") @torch._dynamo.on_compile_end def end_callback(): print("Compilation ended.") mod = ToyModel() x = torch.randn(10, 10) with patch("sys.stdout", new_callable=io.StringIO) as mock_stdout: opt_mod = torch.compile(backend="eager", fullgraph=True)(mod) opt_mod(x) printed_output = mock_stdout.getvalue().strip() self.assertEqual(printed_output, "Compilation started.\nCompilation ended.")
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
end_callback
def end_callback(): print("Compilation ended.") mod = ToyModel() x = torch.randn(10, 10) with patch("sys.stdout", new_callable=io.StringIO) as mock_stdout: opt_mod = torch.compile(backend="eager", fullgraph=True)(mod) opt_mod(x) printed_output = mock_stdout.getvalue().strip() self.assertEqual(printed_output, "Compilation started.\nCompilation ended.")
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_compile.py
test_compile_eager_options
def test_compile_eager_options(self): @torch.compile(backend="eager", options={"foo": 2}) def f(x): return x + x f(torch.randn(3)) @torch.compile(backend="aot_eager", options={"foo": 2}) def g(x): return x + x g(torch.randn(3))
import inspect import io import os import tempfile from unittest.mock import patch import torch from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter class InPlaceCompilationTests(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
nothing
def nothing(*args): pass code = bytecode_transformation.transform_code_object(fn.__code__, nothing) self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
nothing
def nothing(*args): pass code = bytecode_transformation.transform_code_object(fn.__code__, nothing) self.assertEqual(code.co_exceptiontable, fn.__code__.co_exceptiontable)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
test_remove_dead_code_with_exn_table_entries
def test_remove_dead_code_with_exn_table_entries(self): create_instruction = bytecode_transformation.create_instruction target1 = create_instruction("NOP") target2 = create_instruction("NOP") target3 = create_instruction("NOP") exn_start = create_instruction("NOP") exn_end = create_instruction("NOP") insts = [ create_instruction("JUMP_FORWARD", target=target1), exn_start, # dead target1, create_instruction("JUMP_FORWARD", target=target3), exn_end, # dead target2, target3, ] exn_start.exn_tab_entry = bytecode_transformation.InstructionExnTabEntry( exn_start, exn_end, target2, 0, True ) bytecode_transformation.propagate_inst_exn_table_entries(insts) insts = bytecode_analysis.remove_dead_code(insts) self.assertEqual(len(insts), 5) self.assertNotIn(exn_start, insts) self.assertNotIn(exn_end, insts) self.assertIn(target2, insts) self.assertIn(target3, insts) bytecode_transformation.update_offsets(insts) tab = bytecode_transformation.compute_exception_table(insts) self.assertEqual(len(tab), 1) self.assertEqual(tab[0].start, 2) self.assertEqual(tab[0].end, 4) self.assertEqual(tab[0].target, 6)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 class BytecodeTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
test_bytecode_from_template_noprefix
def test_bytecode_from_template_noprefix(self): # Test that 3.11+ prefix instructions are removed def gen_fn(): cl = None def fn(): return cl return fn fn = gen_fn() dis_insts = list(dis.get_instructions(fn)) names = {inst.opname for inst in dis_insts} self.assertIn("RESUME", names) self.assertIn("COPY_FREE_VARS", names) insts = bytecode_transformation.bytecode_from_template(fn) names = {inst.opname for inst in insts} self.assertNotIn("RESUME", names) self.assertNotIn("COPY_FREE_VARS", names)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 class BytecodeTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
gen_fn
def gen_fn(): cl = None def fn(): return cl return fn fn = gen_fn() dis_insts = list(dis.get_instructions(fn)) names = {inst.opname for inst in dis_insts} self.assertIn("RESUME", names) self.assertIn("COPY_FREE_VARS", names) insts = bytecode_transformation.bytecode_from_template(fn) names = {inst.opname for inst in insts} self.assertNotIn("RESUME", names) self.assertNotIn("COPY_FREE_VARS", names)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
test_bytecode_from_template_noreturn_const
def test_bytecode_from_template_noreturn_const(self): # Test 3.12+ RETURN_CONST def fn(): if x: return 1 return 0 dis_insts = list(dis.get_instructions(fn)) dis_return_consts = list( filter(lambda x: x.opname == "RETURN_CONST", dis_insts) ) self.assertGreater(len(dis_return_consts), 1) self.assertTrue(dis_insts[-1].opname == "RETURN_CONST") insts = bytecode_transformation.bytecode_from_template(fn, noprefix=False) self.assertEqual(insts[-1].opname, "NOP") insts_i = 0 for i, inst in enumerate(dis_insts): if inst.opname == "RETURN_CONST": self.assertEqual(insts[insts_i].opname, "LOAD_CONST") insts_i += 1 if insts_i != len(insts) - 1: self.assertIn("JUMP", insts[insts_i].opname) self.assertIs(insts[insts_i].target, insts[-1]) insts_i += 1
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 class BytecodeTests(torch._dynamo.test_case.TestCase): from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
hook
def hook(code, out_code): print(code) print(out_code) return code torch._dynamo.reset() handle = torch._dynamo.convert_frame.register_bytecode_hook(hook) try: opt_fn = torch.compile(fn) for i in range(2, 12): opt_fn(torch.randn(i), torch.randn(i)) finally: handle.remove()
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
fn
def fn(): a = 10 b = 20 # prevent LOAD_FAST_LOAD_FAST in 3.13 by wrapping b with g() c = a + g(b) f = "linetable_writer" return f"Test if {f} generates correct co_linetable: {c}" keys = bytecode_transformation.get_code_keys() code_options = {k: getattr(fn.__code__, k) for k in keys} result = bytecode_transformation.clean_and_assemble_instructions( bytecode_transformation.cleaned_instructions(fn.__code__), keys, code_options, ) l1, l2 = list(fn.__code__.co_positions()), list(result[1].co_positions()) self.assertEqual(len(l1), len(l2)) for p1, p2 in zip(l1, l2): self.assertEqual(p1, p2) # TODO co_lnotab is deprecated in 3.12 and will be removed in 3.14 # In 3.11+,. it is computed lazily from other linetable attributes (e.g. co_linetable), # so we do not set this attribute ourselves. self.assertEqual(fn.__code__.co_lnotab, result[1].co_lnotab)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_bytecode_utils.py
f
def f(x, y): z = 1 if x is None: z *= 2 if y is not None: z *= 3 return z opt_f = torch._dynamo.optimize("eager", nopython=True)(f) self.assertEqual(opt_f(None, torch.ones(2)), 6) if sys.version_info >= (3, 11): insts = bytecode_transformation.cleaned_instructions(f.__code__) for inst in insts: self.assertNotIn("_NONE", inst.opname)
import collections import dis import sys import unittest import torch import torch._dynamo.test_case from torch._dynamo import bytecode_analysis, bytecode_transformation from torch._dynamo.testing import skipIfNotPy311, skipIfNotPy312 from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added