library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributions/test_distributions.py
test_lazy_probs_initialization
def test_lazy_probs_initialization(self): for Dist, params in self.examples: param = params[0].copy() if 'probs' not in param: continue dist = Dist(**param) dist.sample() message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params)) self.assertNotIn('logits', dist.__dict__, msg=message) try: dist.enumerate_support() except NotImplementedError: pass self.assertNotIn('logits', dist.__dict__, msg=message) batch_shape, event_shape = dist.batch_shape, dist.event_shape self.assertNotIn('logits', dist.__dict__, msg=message)
def test_lazy_probs_initialization(self): for Dist, params in self.examples: param = params[0].copy() if "probs" not in param: continue dist = Dist(**param) dist.sample() message = f"Failed for {Dist.__name__} example 0/{len(params)}" self.assertNotIn("logits", dist.__dict__, msg=message) try: dist.enumerate_support() except NotImplementedError: pass self.assertNotIn("logits", dist.__dict__, msg=message) batch_shape, event_shape = dist.batch_shape, dist.event_shape self.assertNotIn("logits", dist.__dict__, msg=message)
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] class TestLazyLogitsInitialization(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) class TestLazyLogitsInitialization(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
_get_custom_policy
def _get_custom_policy(no_recompute_list=None, must_recompute_list=None): def _custom_policy(ctx, func, *args, **kwargs): if no_recompute_list is not None and func in no_recompute_list: return CheckpointPolicy.MUST_SAVE if must_recompute_list is not None and func in must_recompute_list: return CheckpointPolicy.MUST_RECOMPUTE else: return CheckpointPolicy.PREFER_RECOMPUTE return _custom_policy class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): def _validate(self, fn, backend, *args, skip_check=False, fullgraph=True): cloned_args = [] for arg in args: cloned_args.append(arg.clone().detach().requires_grad_(arg.requires_grad)) torch.manual_seed(0) expected = fn(*args) expected.sum().backward() torch.manual_seed(0) result = torch.compile(fn, fullgraph=fullgraph, backend=backend)(*cloned_args) result.sum().backward() if not skip_check: self.assertEqual( result, expected, msg="Output mismatch between torch.compile and eager versions", ) for arg, cloned_arg in zip(args, cloned_args): self.assertEqual( arg.grad, cloned_arg.grad, msg="Gradient mismatch between torch.compile and eager versions", ) def _compare_orig_and_checkpointed_fns( self, orig_fn, checkpointed_fn, *args, fullgraph=True ): # The original version and the checkpointed version of the same function # should produce the same outputs and the same gradients under torch.compile. # Run original version cloned_args_orig_fn = [] for arg in args: cloned_args_orig_fn.append( arg.clone().detach().requires_grad_(arg.requires_grad) ) torch.manual_seed(0) compiled_orig_fn = torch.compile( orig_fn, fullgraph=fullgraph, backend="inductor" ) result_orig_fn = compiled_orig_fn(*cloned_args_orig_fn) result_orig_fn.sum().backward() # Run checkpointed version cloned_args_checkpointed_fn = [] for arg in args: cloned_args_checkpointed_fn.append( arg.clone().detach().requires_grad_(arg.requires_grad) ) torch.manual_seed(0) compiled_checkpointed_fn = torch.compile( checkpointed_fn, fullgraph=fullgraph, backend="inductor" ) result_checkpointed_fn = compiled_checkpointed_fn(*cloned_args_checkpointed_fn) result_checkpointed_fn.sum().backward() # Check that outputs and gradients are equal self.assertEqual( result_orig_fn, result_checkpointed_fn, msg="Output mismatch between the original version and the checkpointed version of the same function", ) for cloned_arg_orig_fn, cloned_arg_checkpointed_fn in zip( cloned_args_orig_fn, cloned_args_checkpointed_fn ): self.assertEqual( cloned_arg_orig_fn.grad, cloned_arg_checkpointed_fn.grad, msg="Gradient mismatch between the original version and the checkpointed version of the same function", ) @requires_cuda def test_tags_function(self): def gn(x, y): return torch.sigmoid(torch.matmul(x, y)) def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y) @requires_cuda def test_tags_function_via_global_checkpoint(self): def gn(x, y): return torch.sigmoid(torch.matmul(x, y)) def fn(x, y): # This goes through VariableBuilder return checkpoint(gn, torch.sin(x), y, use_reentrant=True) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y) @requires_cuda def test_tags_function_with_kwargs(self): def gn(x, y): return torch.sigmoid(torch.matmul(x, y)) def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True, preserve_rng_state=False ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y) @requires_cuda def test_tags_sequential_layers(self): def gn(x): x = x.cos() for _ in range(3): x = torch.mm(x, x) x = x.cos() return x def fn(x): x = torch.utils.checkpoint.checkpoint(gn, x) x = torch.utils.checkpoint.checkpoint(gn, x) return x x = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=6, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freqs=[2, 18], ops=[torch.ops.aten.cos.default, torch.ops.aten.mm.default], ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x) @requires_cuda def test_tags_multiple_checkpoints(self): def gn(x, y): return torch.sigmoid(torch.matmul(x, y)) def fn(x, y): x = torch.sin(x) z = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) x = torch.sin(z) z = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) return z x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=2, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=6, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y) @requires_cuda def test_tags_module(self): class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 10) def forward(self, x): return torch.sigmoid(self.linear(x)) mod = MockModule().cuda() def fn(x): return torch.utils.checkpoint.checkpoint( mod, torch.sin(x), use_reentrant=True ) x = torch.randn(10, 10, device="cuda", requires_grad=True) fw_compiler = functools.partial( count_ops, freq=1, op=torch.ops.aten.sigmoid.default ) bw_compiler = functools.partial( count_ops, freq=1, op=torch.ops.aten.sigmoid.default ) backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x) @requires_cuda def test_tags_decomps(self): # Ensures that tags are passed on through decompositions as well class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 10) def forward(self, x): return torch.nn.functional.gelu(self.linear(x)) mod = MockModule().cuda() def fn(x): return torch.utils.checkpoint.checkpoint( mod, torch.sin(x), use_reentrant=True ) x = torch.randn(10, 10, device="cuda", requires_grad=True) fw_compiler = functools.partial( count_ops, freq=1, op=torch.ops.aten.erf.default ) bw_compiler = functools.partial( count_ops, freq=1, op=torch.ops.aten.erf.default ) backend = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, decompositions=lambda: import_module( "torch._inductor.compile_fx" ).select_decomp_table(), ) self._validate(fn, backend, x) @requires_cuda @torch._inductor.config.patch(fallback_random=True) def test_tags_recomputed_rand(self): def gn(x, y): return torch.sigmoid(torch.rand_like(x) * y) * x def fn(x, y): x = torch.sin(x) x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) x = torch.sin(x) z = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) return z x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) # fw_compiler = functools.partial(count_ops, freq=2, op=torch.ops.aten.mm.default) # bw_compiler = functools.partial( # count_ops, freq=6, op=torch.ops.aten.mm.default # ) # mm recomputed in the bwd # backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) backend = "inductor" self._validate(fn, backend, x, y) @requires_cuda @torch._inductor.config.patch(fallback_random=True) def test_tags_rand(self): def gn(x, y): x = torch.mm(x, y) x = torch.mm(x, y) return x def fn(x, y): x = torch.sin(x) x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) x = torch.sin(x) # x = torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) return x x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) # fw_compiler = functools.partial(count_ops, freq=2, op=torch.ops.aten.mm.default) # bw_compiler = functools.partial( # count_ops, freq=6, op=torch.ops.aten.mm.default # ) # mm recomputed in the bwd # backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) # backend = "aot_eager" backend = "inductor" self._validate(fn, backend, x, y) @requires_cuda @torch._inductor.config.patch(fallback_random=True) def test_tags_dropout(self): # Figure out a way to test the number of inductor_random calls class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(10, 10) self.dropout = torch.nn.Dropout(0.2) def forward(self, x): return self.dropout(self.linear(x)) mod = MockModule().cuda() def fn(x): return torch.utils.checkpoint.checkpoint(mod, x, use_reentrant=True) x = torch.randn(10, 10, device="cuda", requires_grad=True) backend = "inductor" # rand decomps do not have have numerical results as eager self._validate(fn, backend, x, skip_check=True) @requires_cuda def test_fallback(self): def gn(x, y): torch._dynamo.graph_break() a = torch.sigmoid(torch.matmul(x, y)) torch._dynamo.graph_break() return torch.cos(a) def fn(x, y): return torch.cos(checkpoint(gn, torch.sin(x), y, use_reentrant=False)) x = torch.randn(4, 4, requires_grad=True) y = torch.randn(4, 4, requires_grad=True) args = (x, y) backend = "aot_eager" cnt = CompileCounterWithBackend(backend) expected = fn(*args) result = torch.compile(fn, backend=cnt)(*args) self.assertEqual(result, expected) # One graph for torch.sin on the input, and other for torch.cos. self.assertEqual(cnt.frame_count, 2) self.assertEqual(cnt.op_count, 2) self.assertEqual(len(cnt.graphs), 2) @requires_cuda def test_kwargs(self): def gn(x, y, z=None): a = torch.matmul(x, y) if z is not None: return torch.matmul(a, z) return a def fn(x, y, z): return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z)) x = torch.randn(4, 4, requires_grad=True) y = torch.randn(4, 4, requires_grad=True) z = torch.randn(4, 4, requires_grad=True) args = (x, y, z) backend = "aot_eager" cnt = CompileCounterWithBackend(backend) expected = fn(*args) result = torch.compile(fn, backend=cnt)(*args) self.assertEqual(result, expected) self.assertEqual(cnt.frame_count, 1) self.assertEqual(len(cnt.graphs), 1) wrap_node = find_first_node(cnt.graphs[0], tag_activation_checkpoint) # one for checkpoint, and 3 for x, y, z self.assertEqual(len(wrap_node.args), 4) body_function = getattr(cnt.graphs[0], wrap_node.args[0].name) self.assertEqual(op_count(body_function), 2) @requires_cuda def test_symints_location(self): def gn(x, y): return torch.matmul(x, torch.nn.functional.dropout(y, 0.5)) def fn(x, y): return torch.utils.checkpoint.checkpoint(gn, x, y, use_reentrant=True) backend = "aot_eager" cnt = CompileCounterWithBackend(backend) opt_fn = torch.compile(fn, backend=cnt) x = torch.randn(4, 4, requires_grad=True) y = torch.randn(4, 4, requires_grad=True) args = (x, y) expected = fn(*args) result = opt_fn(*args) x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) args = (x, y) expected = fn(*args) result = opt_fn(*args) self.assertEqual(result.shape, expected.shape) self.assertEqual(cnt.frame_count, 2) self.assertEqual(len(cnt.graphs), 2) wrap_node = find_first_node(cnt.graphs[0], tag_activation_checkpoint) self.assertEqual(len(wrap_node.args), 3) @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") def test_compile_selective_checkpoint_must_recompute(self): def context_fn_must_recompute_mm(): must_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy( must_recompute_list=must_recompute_list, ), ) def context_fn_no_recompute_mm(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy( no_recompute_list=no_recompute_list, ), ) def _test(context_fn, bw_compiler): def gn(x): return torch.sigmoid(torch.matmul(x, x)) def fn(x): return torch.utils.checkpoint.checkpoint( gn, x, use_reentrant=False, context_fn=context_fn, ) x = torch.randn(4, 4, requires_grad=True) fw_compiler = functools.partial( count_ops, freq=1, op=torch.ops.aten.mm.default, ) backend = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) self._validate(fn, backend, x) _test( context_fn=context_fn_must_recompute_mm, bw_compiler=functools.partial( count_ops, freq=3, # 1 matmul recompute and 2 bwd mm ops per fwd matmul, so 1 + 2 * 1 = 3) op=torch.ops.aten.mm.default, ), ) _test( context_fn=context_fn_no_recompute_mm, bw_compiler=functools.partial( count_ops, freq=2, # 2 bwd mm ops per fwd matmul op=torch.ops.aten.mm.default, ), ) @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") def test_compile_selective_checkpoint_must_not_recompute_gemm(self): def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) ) def gn(x, y): return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, x, y, use_reentrant=False, context_fn=selective_checkpointing_context_fn, ) x = torch.randn(4, 4, requires_grad=True, device="cuda") y = torch.randn(4, 4, requires_grad=True, device="cuda") fw_compiler = functools.partial( count_ops, freq=2, op=torch.ops.aten.mm.default, ) bw_compiler = functools.partial( count_ops, # We would've expected 6 here # (2 matmul recompute and 2 mm ops per fwd matmul, so 2 + 2 * 2 = 6) # if we didn't enable selective checkpointing. freq=4, op=torch.ops.aten.mm.default, ) backend = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) self._validate(fn, backend, x, y) self._compare_orig_and_checkpointed_fns(gn, fn, x, y) @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") def test_compile_selective_checkpoint_tensor_subclass(self): def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) ) def gn(x, y): return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, x, y, use_reentrant=False, context_fn=selective_checkpointing_context_fn, ) rand_tensor = torch.randn(4, 4, requires_grad=True, device="cuda") # tensor subclasses as inputs x = TwoTensor(rand_tensor, rand_tensor.clone()) y = TwoTensor(rand_tensor.clone(), rand_tensor.clone()) fw_compiler = functools.partial( count_ops, freq=4, op=torch.ops.aten.mm.default, ) bw_compiler = functools.partial( count_ops, # We would've expected 12 here # (4 matmul recompute and 4 mm ops per fwd matmul, so 4 + 2 * 4 = 12) # if we didn't enable selective checkpointing. freq=8, op=torch.ops.aten.mm.default, ) backend = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) self._validate(fn, backend, x, y) self._compare_orig_and_checkpointed_fns(gn, fn, x, y) @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") def test_compile_selective_checkpoint_custom_rule(self): def _get_custom_policy(meta): no_recompute_list = [ torch.ops.aten.mm.default, ] def _custom_policy(mode, func, *args, **kwargs): mm_count_key = f"{mode}_mm_count" if mm_count_key not in meta: meta[mm_count_key] = 0 if func == torch.ops.aten.mm.default: meta[mm_count_key] += 1 # Saves output of all compute ops, except second mm # (i.e. we will hint the partitioner to recompute second mm in backward pass) return func in no_recompute_list and not ( func == torch.ops.aten.mm.default and meta[mm_count_key] == 2 ) return _custom_policy
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
_custom_policy
def _custom_policy(ctx, func, *args, **kwargs): if no_recompute_list is not None and func in no_recompute_list: return CheckpointPolicy.MUST_SAVE if must_recompute_list is not None and func in must_recompute_list: return CheckpointPolicy.MUST_RECOMPUTE else: return CheckpointPolicy.PREFER_RECOMPUTE return _custom_policy
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
selective_checkpointing_context_fn
def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
selective_checkpointing_context_fn
def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
selective_checkpointing_context_fn
def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
selective_checkpointing_context_fn
def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
selective_checkpointing_context_fn
def selective_checkpointing_context_fn(): no_recompute_list = [ torch.ops.aten.mm.default, ] return create_selective_checkpoint_contexts( _get_custom_policy(no_recompute_list=no_recompute_list) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
_custom_policy
def _custom_policy(ctx, func, *args, **kwargs): if no_recompute_list is not None and func in no_recompute_list: return CheckpointPolicy.MUST_SAVE if must_recompute_list is not None and func in must_recompute_list: return CheckpointPolicy.MUST_RECOMPUTE else: return CheckpointPolicy.PREFER_RECOMPUTE return _custom_policy
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
parametrization
def parametrization(self, x): return torch.sigmoid(torch.mul(x, x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class Parametrization(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
apply_parametrization
def apply_parametrization(model): modules = list(model.modules()) for mod in modules: params_dict = dict(mod.named_parameters(recurse=False)) for p_name, p in params_dict.items(): mod.register_parameter(p_name, nn.Parameter(p)) nn.utils.parametrize.register_parametrization( mod, p_name, Parametrization(), unsafe=True ) return model class MLPModule(nn.Module): def __init__(self) -> None: super().__init__() torch.manual_seed(5) self.net1 = nn.Linear(16, 16, bias=False) def forward(self, x): return self.net1(x) def reset_parameters(self): self.net1.reset_parameters() fw_compiler = functools.partial( count_ops, freqs=[1, 1], ops=[torch.ops.aten.mul.Tensor, torch.ops.aten.sigmoid.default], ) bw_compiler = functools.partial( count_ops, freqs=[ 2, # 1 from mul recompute, 1 from mul backward 1, ], ops=[torch.ops.aten.mul.Tensor, torch.ops.aten.sigmoid.default], ) backend = aot_autograd( fw_compiler=fw_compiler, bw_compiler=bw_compiler, partition_fn=min_cut_rematerialization_partition, ) model = MLPModule() model = apply_parametrization(model) model_compiled = torch.compile( copy.deepcopy(model), backend=backend, fullgraph=True ) input = torch.randn(8, 16, requires_grad=True) input_compiled = copy.deepcopy(input) out = model(input) out.sum().backward() out_compiled = model_compiled(input_compiled) out_compiled.sum().backward() self.assertEqual(out, out_compiled) self.assertEqual(input.grad, input_compiled.grad)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
reset_parameters
def reset_parameters(self): self.net1.reset_parameters()
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MLPModule(nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
gn
def gn(x, y): return torch.sigmoid(torch.matmul(x, y))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributions/test_distributions.py
test_enumerate_support
def test_enumerate_support(self): for Dist, keys, values, sample in self._examples(): # FIXME traced functions produce incorrect results xfail = [Binomial] if Dist in xfail: continue def f(*values): param = dict(zip(keys, values)) dist = Dist(**param) return dist.enumerate_support() try: traced_f = torch.jit.trace(f, values) except NotImplementedError: continue # check on different data values, sample = self._perturb(Dist, keys, values, sample) expected = f(*values) actual = traced_f(*values) self.assertEqual(expected, actual, msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_enumerate_support(self): for Dist, keys, values, sample in self._examples(): # FIXME traced functions produce incorrect results xfail = [Binomial] if Dist in xfail: continue def f(*values): param = dict(zip(keys, values)) dist = Dist(**param) return dist.enumerate_support() try: traced_f = torch.jit.trace(f, values) except NotImplementedError: continue # check on different data values, sample = self._perturb(Dist, keys, values, sample) expected = f(*values) actual = traced_f(*values) self.assertEqual( expected, actual, msg=f"{Dist.__name__}\nExpected:\n{expected}\nActual:\n{actual}", )
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] class TestJit(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) class TestJit(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributions/test_distributions.py
test_icdf
def test_icdf(self): for pytorch_dist, scipy_dist in self.distribution_pairs: samples = torch.rand((5,) + pytorch_dist.batch_shape) try: icdf = pytorch_dist.icdf(samples) except NotImplementedError: continue self.assertEqual(icdf, scipy_dist.ppf(samples), msg=pytorch_dist)
def test_icdf(self): for pytorch_dist, scipy_dist in self.distribution_pairs: samples = torch.rand((5,) + pytorch_dist.batch_shape, dtype=torch.double) try: icdf = pytorch_dist.icdf(samples) except NotImplementedError: continue self.assertEqual(icdf, scipy_dist.ppf(samples), msg=pytorch_dist)
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] @unittest.skipIf(not TEST_NUMPY, "NumPy not found") class TestAgainstScipy(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) @unittest.skipIf(not TEST_NUMPY, "NumPy not found") @skipIfTorchDynamo("FIXME: Tries to trace through SciPy and fails") class TestAgainstScipy(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributions/test_distributions.py
test_invalid
def test_invalid(self): for Dist, params in BAD_EXAMPLES: for i, param in enumerate(params): try: with self.assertRaises(ValueError): Dist(validate_args=True, **param) except AssertionError as e: fail_string = "ValueError not raised for {} example {}/{}" raise AssertionError( fail_string.format(Dist.__name__, i + 1, len(params)) ) from e
def test_invalid(self): for Dist, params in _get_bad_examples(): for i, param in enumerate(params): try: with self.assertRaises(ValueError): Dist(validate_args=True, **param) except AssertionError as e: fail_string = "ValueError not raised for {} example {}/{}" raise AssertionError( fail_string.format(Dist.__name__, i + 1, len(params)) ) from e
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] class TestValidation(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) class TestValidation(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
debug_compile_fx_inner
def debug_compile_fx_inner(graph, example_inputs, *args, **kwargs): aot_graphs.append(graph) return compile_fx.compile_fx_inner(graph, example_inputs, *args, **kwargs) backend = functools.partial( compile_fx.compile_fx, inner_compile=debug_compile_fx_inner ) opt_fn = torch.compile(fn, backend=backend, fullgraph=True) opt_fn(*args1).sum().backward() if PLATFORM_SUPPORTS_CUDNN_ATTENTION and SM90OrLater: op = torch.ops.aten._scaled_dot_product_cudnn_attention.default else: op = torch.ops.aten._scaled_dot_product_flash_attention.default fwd_graph = aot_graphs[0] self.assertTrue( count_ops( fwd_graph, [], freq=1, op=op, ) ) bwd_graph = aot_graphs[1] # Check that sin is not recomputed in the backward graph - checks percolate tags self.assertTrue(count_ops(bwd_graph, [], freq=0, op=torch.ops.aten.sin.default)) # Check that the sdpa op is recomputed in the backward graph self.assertTrue( count_ops( bwd_graph, [], freq=1, op=op, ) )
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributions/test_distributions.py
_examples
def _examples(self): for Dist, params in EXAMPLES: for param in params: keys = param.keys() values = tuple(param[key] for key in keys) if not all(isinstance(x, torch.Tensor) for x in values): continue sample = Dist(**param).sample() yield Dist, keys, values, sample
def _examples(self): for Dist, params in _get_examples(): for param in params: keys = param.keys() values = tuple(param[key] for key in keys) if not all(isinstance(x, torch.Tensor) for x in values): continue sample = Dist(**param).sample() yield Dist, keys, values, sample
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] class TestJit(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) class TestJit(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributions/test_distributions.py
test_log_prob
def test_log_prob(self): for Dist, keys, values, sample in self._examples(): # FIXME traced functions produce incorrect results xfail = [LowRankMultivariateNormal, MultivariateNormal] if Dist in xfail: continue def f(sample, *values): param = dict(zip(keys, values)) dist = Dist(**param) return dist.log_prob(sample) traced_f = torch.jit.trace(f, (sample,) + values) # check on different data values, sample = self._perturb(Dist, keys, values, sample) expected = f(sample, *values) actual = traced_f(sample, *values) self.assertEqual(expected, actual, msg='{}\nExpected:\n{}\nActual:\n{}'.format(Dist.__name__, expected, actual))
def test_log_prob(self): for Dist, keys, values, sample in self._examples(): # FIXME traced functions produce incorrect results xfail = [LowRankMultivariateNormal, MultivariateNormal] if Dist in xfail: continue def f(sample, *values): param = dict(zip(keys, values)) dist = Dist(**param) return dist.log_prob(sample) traced_f = torch.jit.trace(f, (sample,) + values) # check on different data values, sample = self._perturb(Dist, keys, values, sample) expected = f(sample, *values) actual = traced_f(sample, *values) self.assertEqual( expected, actual, msg=f"{Dist.__name__}\nExpected:\n{expected}\nActual:\n{actual}", )
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch from torch import inf, nan from torch.testing._internal.common_utils import \ (TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests, gradcheck, skipIfTorchDynamo) from torch.testing._internal.common_cuda import TEST_CUDA from torch.autograd import grad import torch.autograd.forward_ad as fwAD from torch.autograd.functional import jacobian from torch.distributions import (Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, Kumaraswamy, LKJCholesky, Laplace, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, constraints, kl_divergence) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import (AffineTransform, CatTransform, ExpTransform, StackTransform, identity_transform) from torch.distributions.utils import (probs_to_logits, lazy_property, tril_matrix_to_vec, vec_to_tril_matrix) from torch.nn.functional import softmax load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.stats import scipy.special Example = namedtuple('Example', ['Dist', 'params']) EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]), Example(Geometric, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, ]), Example(Beta, [ { 'concentration1': torch.randn(2, 3).exp().requires_grad_(), 'concentration0': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration1': torch.randn(4).exp().requires_grad_(), 'concentration0': torch.randn(4).exp().requires_grad_(), }, ]), Example(Categorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([[10., 8.], [5., 3.]])}, {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor(0.)}, ]), Example(Multinomial, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': 1.0}, {'loc': torch.tensor([0.0]), 'scale': 1.0}, {'loc': torch.tensor([[0.0], [0.0]]), 'scale': torch.tensor([[1.0], [1.0]])} ]), Example(Chi2, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(StudentT, [ {'df': torch.randn(2, 3).exp().requires_grad_()}, {'df': torch.randn(1).exp().requires_grad_()}, ]), Example(Dirichlet, [ {'concentration': torch.randn(2, 3).exp().requires_grad_()}, {'concentration': torch.randn(4).exp().requires_grad_()}, ]), Example(Exponential, [ {'rate': torch.randn(5, 5).abs().requires_grad_()}, {'rate': torch.randn(1).abs().requires_grad_()}, ]), Example(FisherSnedecor, [ { 'df1': torch.randn(5, 5).abs().requires_grad_(), 'df2': torch.randn(5, 5).abs().requires_grad_(), }, { 'df1': torch.randn(1).abs().requires_grad_(), 'df2': torch.randn(1).abs().requires_grad_(), }, { 'df1': torch.tensor([1.0]), 'df2': 1.0, } ]), Example(Gamma, [ { 'concentration': torch.randn(2, 3).exp().requires_grad_(), 'rate': torch.randn(2, 3).exp().requires_grad_(), }, { 'concentration': torch.randn(1).exp().requires_grad_(), 'rate': torch.randn(1).exp().requires_grad_(), }, ]), Example(Gumbel, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, ]), Example(HalfCauchy, [ {'scale': 1.0}, {'scale': torch.tensor([[1.0], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.randn(5, 5).abs().requires_grad_()}, {'scale': torch.randn(1).abs().requires_grad_()}, {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)} ]), Example(Independent, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 0, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 1, }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 2, }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'reinterpreted_batch_ndims': 3, }, ]), Example(Kumaraswamy, [ { 'concentration1': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), 'concentration0': torch.empty(2, 3).uniform_(1, 2).requires_grad_(), }, { 'concentration1': torch.rand(4).uniform_(1, 2).requires_grad_(), 'concentration0': torch.rand(4).uniform_(1, 2).requires_grad_(), }, ]), Example(LKJCholesky, [ { 'dim': 2, 'concentration': 0.5 }, { 'dim': 3, 'concentration': torch.tensor([0.5, 1., 2.]), }, { 'dim': 100, 'concentration': 4. }, ]), Example(Laplace, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LogisticNormal, [ { 'loc': torch.randn(5, 5).requires_grad_(), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1).requires_grad_(), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(LowRankMultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'cov_factor': torch.randn(5, 2, 1, requires_grad=True), 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True), }, { 'loc': torch.randn(4, 3, requires_grad=True), 'cov_factor': torch.randn(3, 2, requires_grad=True), 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True), } ]), Example(MultivariateNormal, [ { 'loc': torch.randn(5, 2, requires_grad=True), 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), }, { 'loc': torch.randn(2, 3, requires_grad=True), 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), }, { 'loc': torch.randn(5, 3, 2, requires_grad=True), 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), }, { 'loc': torch.tensor([1.0, -1.0]), 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), }, ]), Example(Normal, [ { 'loc': torch.randn(5, 5, requires_grad=True), 'scale': torch.randn(5, 5).abs().requires_grad_(), }, { 'loc': torch.randn(1, requires_grad=True), 'scale': torch.randn(1).abs().requires_grad_(), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)}, {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 1.0, 'alpha': 1.0 }, { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'alpha': torch.randn(5, 5).abs().requires_grad_() }, { 'scale': torch.tensor([1.0]), 'alpha': 1.0 } ]), Example(Poisson, [ { 'rate': torch.randn(5, 5).abs().requires_grad_(), }, { 'rate': torch.randn(3).abs().requires_grad_(), }, { 'rate': 0.2, }, { 'rate': torch.tensor([0.0], requires_grad=True), }, { 'rate': 0.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([0.3]), }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0]) } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]]) }, { 'temperature': torch.tensor([7.2]), 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': [], }, { 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True), torch.randn(2, 3).abs().requires_grad_()), 'transforms': ExpTransform(), }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)), ExpTransform()], }, { 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True), torch.randn(2, 3, 5).abs().requires_grad_()), 'transforms': AffineTransform(1, 2), }, { 'base_distribution': Uniform(torch.tensor(1e8).log(), torch.tensor(1e10).log()), 'transforms': ExpTransform(), }, ]), Example(Uniform, [ { 'low': torch.zeros(5, 5, requires_grad=True), 'high': torch.ones(5, 5, requires_grad=True), }, { 'low': torch.zeros(1, requires_grad=True), 'high': torch.ones(1, requires_grad=True), }, { 'low': torch.tensor([1.0, 1.0], requires_grad=True), 'high': torch.tensor([2.0, 3.0], requires_grad=True), }, ]), Example(Weibull, [ { 'scale': torch.randn(5, 5).abs().requires_grad_(), 'concentration': torch.randn(1).abs().requires_grad_() } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0], [0.1, 0.25, 0.0], [0.0, 0.0, 0.3]], requires_grad=True), 'df': torch.tensor([5., 4], requires_grad=True), }, { 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]], [[2.0, 0.0], [0.3, 0.25]], [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True), 'df': torch.tensor([5., 3.5, 3], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': torch.tensor([3.0]), }, { 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]), 'df': 3.0, }, ]), Example(MixtureSameFamily, [ { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': Normal(torch.randn(5, requires_grad=True), torch.rand(5, requires_grad=True)), }, { 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)), 'component_distribution': MultivariateNormal( loc=torch.randn(5, 2, requires_grad=True), covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)), }, ]), Example(VonMises, [ { 'loc': torch.tensor(1.0, requires_grad=True), 'concentration': torch.tensor(10.0, requires_grad=True) }, { 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True), 'concentration': torch.tensor([1.0, 10.0], requires_grad=True) }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([0.3], requires_grad=True)}, {'probs': 0.3}, {'logits': torch.tensor([0.], requires_grad=True)}, ]) ] BAD_EXAMPLES = [ Example(Bernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]), Example(Beta, [ { 'concentration1': torch.tensor([0.0], requires_grad=True), 'concentration0': torch.tensor([0.0], requires_grad=True), }, { 'concentration1': torch.tensor([-1.0], requires_grad=True), 'concentration0': torch.tensor([-2.0], requires_grad=True), }, ]), Example(Geometric, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.3], requires_grad=True)}, {'probs': 1.00000001}, ]), Example(Categorical, [ {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)}, ]), Example(Binomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(NegativeBinomial, [ {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10}, {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True), 'total_count': 10}, ]), Example(Cauchy, [ {'loc': 0.0, 'scale': -1.0}, {'loc': torch.tensor([0.0]), 'scale': 0.0}, {'loc': torch.tensor([[0.0], [-2.0]]), 'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(Chi2, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(StudentT, [ {'df': torch.tensor([0.], requires_grad=True)}, {'df': torch.tensor([-2.], requires_grad=True)}, ]), Example(Dirichlet, [ {'concentration': torch.tensor([0.], requires_grad=True)}, {'concentration': torch.tensor([-2.], requires_grad=True)} ]), Example(Exponential, [ {'rate': torch.tensor([0., 0.], requires_grad=True)}, {'rate': torch.tensor([-2.], requires_grad=True)} ]), Example(FisherSnedecor, [ { 'df1': torch.tensor([0., 0.], requires_grad=True), 'df2': torch.tensor([-1., -100.], requires_grad=True), }, { 'df1': torch.tensor([1., 1.], requires_grad=True), 'df2': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gamma, [ { 'concentration': torch.tensor([0., 0.], requires_grad=True), 'rate': torch.tensor([-1., -100.], requires_grad=True), }, { 'concentration': torch.tensor([1., 1.], requires_grad=True), 'rate': torch.tensor([0., 0.], requires_grad=True), } ]), Example(Gumbel, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(HalfCauchy, [ {'scale': -1.0}, {'scale': 0.0}, {'scale': torch.tensor([[-0.000001], [1.0]])} ]), Example(HalfNormal, [ {'scale': torch.tensor([0., 1.], requires_grad=True)}, {'scale': torch.tensor([1., -1.], requires_grad=True)}, ]), Example(LKJCholesky, [ { 'dim': -2, 'concentration': 0.1 }, { 'dim': 1, 'concentration': 2., }, { 'dim': 2, 'concentration': 0., }, ]), Example(Laplace, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(LogNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, ]), Example(MultivariateNormal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), }, ]), Example(Normal, [ { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([0., 1.], requires_grad=True), }, { 'loc': torch.tensor([1., 1.], requires_grad=True), 'scale': torch.tensor([1., -1.], requires_grad=True), }, { 'loc': torch.tensor([1.0, 0.0], requires_grad=True), 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True), }, ]), Example(OneHotCategorical, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(OneHotCategoricalStraightThrough, [ {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)}, {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)}, ]), Example(Pareto, [ { 'scale': 0.0, 'alpha': 0.0 }, { 'scale': torch.tensor([0.0, 0.0], requires_grad=True), 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0]), 'alpha': -1.0 } ]), Example(Poisson, [ { 'rate': torch.tensor([-0.1], requires_grad=True), }, { 'rate': -1.0, } ]), Example(RelaxedBernoulli, [ { 'temperature': torch.tensor([1.5], requires_grad=True), 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True), }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([-1.0]), } ]), Example(RelaxedOneHotCategorical, [ { 'temperature': torch.tensor([0.5], requires_grad=True), 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True) }, { 'temperature': torch.tensor([2.0]), 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]]) } ]), Example(TransformedDistribution, [ { 'base_distribution': Normal(0, 1), 'transforms': lambda x: x, }, { 'base_distribution': Normal(0, 1), 'transforms': [lambda x: x], }, ]), Example(Uniform, [ { 'low': torch.tensor([2.0], requires_grad=True), 'high': torch.tensor([2.0], requires_grad=True), }, { 'low': torch.tensor([0.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), }, { 'low': torch.tensor([1.0], requires_grad=True), 'high': torch.tensor([0.0], requires_grad=True), } ]), Example(Weibull, [ { 'scale': torch.tensor([0.0], requires_grad=True), 'concentration': torch.tensor([0.0], requires_grad=True) }, { 'scale': torch.tensor([1.0], requires_grad=True), 'concentration': torch.tensor([-1.0], requires_grad=True) } ]), Example(Wishart, [ { 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True), 'df': torch.tensor([1.5], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': torch.tensor([3.], requires_grad=True), }, { 'covariance_matrix': torch.tensor([[1.0, 1.0], [1.0, -2.0]], requires_grad=True), 'df': 3., }, ]), Example(ContinuousBernoulli, [ {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)}, {'probs': torch.tensor([-0.5], requires_grad=True)}, {'probs': 1.00001}, ]) ] class TestJit(DistributionsTestCase):
import math import numbers import unittest from collections import namedtuple from itertools import product from random import shuffle from packaging import version import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan from torch.autograd import grad from torch.autograd.functional import jacobian from torch.distributions import ( Bernoulli, Beta, Binomial, Categorical, Cauchy, Chi2, constraints, ContinuousBernoulli, Dirichlet, Distribution, Exponential, ExponentialFamily, FisherSnedecor, Gamma, Geometric, Gumbel, HalfCauchy, HalfNormal, Independent, InverseGamma, kl_divergence, Kumaraswamy, Laplace, LKJCholesky, LogisticNormal, LogNormal, LowRankMultivariateNormal, MixtureSameFamily, Multinomial, MultivariateNormal, NegativeBinomial, Normal, OneHotCategorical, OneHotCategoricalStraightThrough, Pareto, Poisson, RelaxedBernoulli, RelaxedOneHotCategorical, StudentT, TransformedDistribution, Uniform, VonMises, Weibull, Wishart, ) from torch.distributions.constraint_registry import transform_to from torch.distributions.constraints import Constraint, is_dependent from torch.distributions.dirichlet import _Dirichlet_backward from torch.distributions.kl import _kl_expfamily_expfamily from torch.distributions.transforms import ( AffineTransform, CatTransform, ExpTransform, identity_transform, StackTransform, ) from torch.distributions.utils import ( lazy_property, probs_to_logits, tril_matrix_to_vec, vec_to_tril_matrix, ) from torch.nn.functional import softmax from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import ( gradcheck, load_tests, run_tests, set_default_dtype, set_rng_seed, skipIfTorchDynamo, TestCase, ) load_tests = load_tests TEST_NUMPY = True import numpy as np import scipy.special import scipy.stats Example = namedtuple("Example", ["Dist", "params"]) class TestJit(DistributionsTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_activation_checkpointing.py
forward
def forward(self, x): return torch.sigmoid(self.linear(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class MockModule(torch.nn.Module): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
test_dynamo_does_not_trace_getattr_as_top_frame
def test_dynamo_does_not_trace_getattr_as_top_frame(self): # inline_inbuilt_nn_modules is a proxy to emulate what FSDP tests do. from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) cnt = CompileCounterWithBackend("eager") lin = torch.nn.Linear(1, 1) mod = torch.nn.Sequential(lin, lin) mod = CheckpointWrapper(mod) mod._checkpoint_wrapped_module.a = torch.ones(1, 1) def fn(x): return mod(x) * mod.a opt_fn = torch.compile(fn, backend=cnt, fullgraph=True) x = torch.randn(1, 1) self.assertEqual(opt_fn(x), fn(x))
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_activation_checkpointing.py
fn
def fn(x, y): return torch.utils.checkpoint.checkpoint( gn, torch.sin(x), y, use_reentrant=True ) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) fw_compiler = functools.partial(count_ops, freq=1, op=torch.ops.aten.mm.default) bw_compiler = functools.partial( count_ops, freq=3, op=torch.ops.aten.mm.default ) # mm recomputed in the bwd backend = aot_autograd(fw_compiler=fw_compiler, bw_compiler=bw_compiler) self._validate(fn, backend, x, y)
import copy import functools import math import unittest # noqa: F811 from importlib import import_module import torch import torch._dynamo.config import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_CUDNN_ATTENTION, SM90OrLater, ) from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.two_tensor import TwoTensor from torch.utils.checkpoint import ( checkpoint, CheckpointPolicy, create_selective_checkpoint_contexts, ) requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" ) from torch._inductor import compile_fx from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( checkpoint_wrapper as dist_checkpoint_wrapper, ) from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointWrapper, ) from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_after_aot.py
strip_trailing_whitespace
def strip_trailing_whitespace(r): return "\n".join([l.rstrip() for l in r.split("\n")]) class TestAfterAot(torch._dynamo.test_case.TestCase): @unittest.skipIf(IS_FBCODE, "NotImplementedError") def test_save_graph_repro(self): # TODO: This triggers CUDA context initialization, even though # it is CPU only buf = io.StringIO() args = [torch.randn(4)] def f(x): return (x * x,) gm = make_fx(f)(*args) with tempfile.TemporaryDirectory() as d: save_graph_repro(buf, gm, args, "inductor_accuracy", save_dir=d) r = buf.getvalue() with report_compile_source_on_error(): exec(r, {"__compile_source__": r}) shutil.rmtree(os.path.join(d, "storages")) # Should still work even without the save dir with report_compile_source_on_error(): exec(r, {"__compile_source__": r}) @unittest.skipIf(sys.byteorder != "little", "checksum depends on endianness") def test_dump_tensor(self): def test(tensor, expected): with tempfile.TemporaryDirectory() as d: writer = InputWriter(d, stable_hash=True) writer.tensor("x", tensor) self.assertExpectedInline("\n".join(writer._lines), expected, skip=1) reader = InputReader(d) env = {"reader": reader, "torch": torch} # TODO: assert no logs exec("\n".join(writer._lines), env) self.assertEqual(reader.args[0], tensor) test( torch.zeros(3, 4), """\ buf0 = reader.storage('c17fd92682ca5b304ac71074b558dda9e8eb4d66', 48) reader.tensor(buf0, (3, 4), is_leaf=True) # x""", ) test( torch.ones(3, 4, dtype=torch.int32), """\ buf0 = reader.storage('7c221e2da0c58c700cc2996644dd13d042bd552e', 48, dtype_hint=torch.int32) reader.tensor(buf0, (3, 4), dtype=torch.int32, is_leaf=True) # x""", ) test( torch.empty((3, 4, 5, 6), memory_format=torch.channels_last).fill_(2), """\ buf0 = reader.storage('49ebab3961d6221e64c4c72b0aefd976bdd2afc4', 1440) reader.tensor(buf0, (3, 4, 5, 6), (120, 1, 24, 4), is_leaf=True) # x""", ) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
import io import os import shutil import sys import tempfile import unittest import torch._dynamo.test_case from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_utils import IS_FBCODE from torch.utils._traceback import report_compile_source_on_error from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_after_aot.py
f
def f(x): return (x * x,) gm = make_fx(f)(*args) with tempfile.TemporaryDirectory() as d: save_graph_repro(buf, gm, args, "inductor_accuracy", save_dir=d) r = buf.getvalue() with report_compile_source_on_error(): exec(r, {"__compile_source__": r}) shutil.rmtree(os.path.join(d, "storages")) # Should still work even without the save dir with report_compile_source_on_error(): exec(r, {"__compile_source__": r})
import io import os import shutil import sys import tempfile import unittest import torch._dynamo.test_case from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_utils import IS_FBCODE from torch.utils._traceback import report_compile_source_on_error from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
maybe_dupe_op
def maybe_dupe_op(x): y = x + 1 z = x + 2 if x.numel() < 5: return y, y else: return y, z aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") lib.define("maybe_dupe_op(Tensor a) -> (Tensor, Tensor)") lib.impl("maybe_dupe_op", maybe_dupe_op, "CPU") lib.impl("maybe_dupe_op", maybe_dupe_op, "Meta") # this is just dealing with the fact that # aot_module_simplified expects submods to always return tuples/lists class WrapperModule(torch.nn.Module): def __init__(self, mod): super().__init__() self.mod = mod def forward(self, *args): out = self.mod(*args) if isinstance(out, (list, tuple)): return out return (out,)
def maybe_dupe_op(x): y = x + 1 z = x + 2 if x.numel() < 5: return y, y else: return y, z
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_LSTM
def test_LSTM(self): # https://github.com/pytorch/torchdynamo/issues/1147 class Repro(torch.nn.Module): def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True ) def forward(self, permute: torch.Tensor): self_mod_model_lstm_lstm = self.self_mod_model_lstm_lstm(permute) return (self_mod_model_lstm_lstm,) mod = Repro() aot_mod = torch._dynamo.optimize("aot_eager")(mod) args = [((92, 4, 64), (1, 5888, 92), torch.float32, "cpu", False)] args = [ rand_strided(sh, st, dt, dev).requires_grad_(rg) for (sh, st, dt, dev, rg) in args ] eager_result = mod(*args) aot_result = aot_mod(*args) self.assertTrue(torch._dynamo.testing.same(eager_result, aot_result))
def test_LSTM(self): # https://github.com/pytorch/torchdynamo/issues/1147 class Repro(torch.nn.Module): def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True ) def forward(self, permute: torch.Tensor): self_mod_model_lstm_lstm = self.self_mod_model_lstm_lstm(permute) return (self_mod_model_lstm_lstm,) mod = Repro() aot_mod = torch._dynamo.optimize("aot_eager")(mod) args = [((92, 4, 64), (1, 5888, 92), torch.float32, "cpu", False)] args = [ rand_strided(sh, st, dt, dev).requires_grad_(rg) for (sh, st, dt, dev, rg) in args ] eager_result = mod(*args) aot_result = aot_mod(*args) self.assertTrue(torch._dynamo.testing.same(eager_result, aot_result))
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_call_fn_with_non_const_inputs_aot_unsafe
def test_call_fn_with_non_const_inputs_aot_unsafe(self): class ModuleSpecialFwd(torch.nn.Module): def _some_bad_fwd(self, param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y def forward(self, x, y): return self._some_bad_fwd(x, y) # Init mod mod = ModuleSpecialFwd() x = torch.nn.Parameter(torch.randn(4)) y = torch.randn([4]) # Run it for real real = mod(x, y) # Run it in export graph, _ = torch._dynamo.export(mod, x, y) # Assert equal self.assertTrue(torch._dynamo.testing.same(real, graph(x, y))) # Run exported graph with AOT aot_fn = torch._dynamo.optimize("aot_eager")(graph) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def test_call_fn_with_non_const_inputs_aot_unsafe(self): class ModuleSpecialFwd(torch.nn.Module): def _some_bad_fwd(self, param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y def forward(self, x, y): return self._some_bad_fwd(x, y) # Init mod mod = ModuleSpecialFwd() x = torch.nn.Parameter(torch.randn(4)) y = torch.randn([4]) # Run it for real real = mod(x, y) # Run it in export graph, _ = torch._dynamo.export(mod)(x, y) # Assert equal self.assertTrue(torch._dynamo.testing.same(real, graph(x, y))) # Run exported graph with AOT aot_fn = torch._dynamo.optimize("aot_eager")(graph) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_requires_grad_fake_via_dynamo_recompiles
def test_requires_grad_fake_via_dynamo_recompiles(self): class F(torch.nn.Module): def forward(self, x, y): return (x + y,) x = torch.randn(3, 3, requires_grad=True) y = torch.randn(3, 3, requires_grad=True) z = torch.randn(3, 3, requires_grad=False) cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") failure_reason = None def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def test_requires_grad_fake_via_dynamo_recompiles(self): class F(torch.nn.Module): def forward(self, x, y): return (x + y,) x = torch.randn(3, 3, requires_grad=True) y = torch.randn(3, 3, requires_grad=True) z = torch.randn(3, 3, requires_grad=False) cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") failure_reason = None def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_mutation
def test_mutation(self): # https://github.com/pytorch/torchdynamo/issues/1301 def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def test_mutation(self): # https://github.com/pytorch/torchdynamo/issues/1301 def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_call_fn_with_non_const_inputs_aot_safe
def test_call_fn_with_non_const_inputs_aot_safe(self): class ModuleSpecialFwd(torch.nn.Module): def __init__(self): super().__init__() self.conv = torch.nn.Conv2d( in_channels=3, out_channels=20, kernel_size=(5, 5) ) def _conv_forward(self, x): return self.conv._conv_forward(x, self.conv.weight, self.conv.bias) def forward(self, x): return self._conv_forward(x) # Init mod mod = ModuleSpecialFwd() rx = torch.randn([3, 10, 10]) # Run it for real real = mod(rx) # Run it in export graph, _ = torch._dynamo.export(mod, rx) # Run exported graph with AOT self.assertTrue(torch._dynamo.testing.same(real, graph(rx))) aot_fn = torch._dynamo.optimize("aot_eager")(graph) aot_fn(rx)
def test_call_fn_with_non_const_inputs_aot_safe(self): class ModuleSpecialFwd(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d( in_channels=3, out_channels=20, kernel_size=(5, 5) ) def _conv_forward(self, x): return self.conv._conv_forward(x, self.conv.weight, self.conv.bias) def forward(self, x): return self._conv_forward(x) # Init mod mod = ModuleSpecialFwd() rx = torch.randn([3, 10, 10]) # Run it for real real = mod(rx) # Run it in export graph, _ = torch._dynamo.export(mod)(rx) # Run exported graph with AOT self.assertTrue(torch._dynamo.testing.same(real, graph(rx))) aot_fn = torch._dynamo.optimize("aot_eager")(graph) aot_fn(rx)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
guard_fail_fn
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertEqual( failure_reason, "tensor 'y' requires_grad mismatch. expected requires_grad=1", ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
def guard_fail_fn(failure): nonlocal failure_reason failure_reason = failure[0] fxy = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxy, (x, y)) compare_equal_outs_and_grads(self, F(), fxy, (x, z)) self.assertIn( """tensor 'L['y']' requires_grad mismatch. expected requires_grad=1""", failure_reason, ) # Reset failure reason failure_reason = None self.assertEqual(cc.frame_count, 2) torch._dynamo.reset() # for new backend cc = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fxz = torch._dynamo.optimize(cc, guard_fail_fn=guard_fail_fn)(F()) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) compare_equal_outs_and_grads(self, F(), fxz, (x, z)) self.assertEqual(cc.frame_count, 1) self.assertTrue(failure_reason is None)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
backward
def backward(ctx, grad): (x,) = ctx.saved_tensors return x, grad
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Test(torch.autograd.Function): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
maybe_dupe_op
def maybe_dupe_op(x): y = x + 1 z = x + 2 if x.numel() < 5: return y, y else: return y, z aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") lib.define("maybe_dupe_op(Tensor a) -> (Tensor, Tensor)") lib.impl("maybe_dupe_op", maybe_dupe_op, "CPU") lib.impl("maybe_dupe_op", maybe_dupe_op, "Meta") # this is just dealing with the fact that # aot_module_simplified expects submods to always return tuples/lists class WrapperModule(torch.nn.Module): def __init__(self, mod): super().__init__() self.mod = mod def forward(self, *args): out = self.mod(*args) if isinstance(out, (list, tuple)): return out return (out,)
def maybe_dupe_op(x): y = x + 1 z = x + 2 if x.numel() < 5: return y, y else: return y, z
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
compile_submod
def compile_submod(input_mod, args): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified class WrapperModule(torch.nn.Module): def __init__(self): super().__init__() self.original = input_mod self.submod = aot_module_simplified(input_mod, args, nop) def forward(self, *args): return self.submod(*args) return WrapperModule()
def compile_submod(input_mod, args): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified class WrapperModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.original = input_mod self.submod = aot_module_simplified(input_mod, args, nop) def forward(self, *args): return self.submod(*args) return WrapperModule()
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
__init__
def __init__(self): super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
def __init__(self) -> None: super().__init__() self.self_mod_model_lstm_lstm = torch.nn.LSTM( 64, 64, num_layers=2, bidirectional=True )
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class Repro(torch.nn.Module): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd.py
test_nn_parameter_construction
def test_nn_parameter_construction(self): # https://github.com/pytorch/pytorch/issues/99569 def fn(x): y = x.sin() z = torch.nn.Parameter(torch.ones(1)) return y + z x = torch.rand((4, 4)) opt_fn = torch._dynamo.optimize("aot_eager")(fn) self.assertTrue(torch._dynamo.testing.same(fn(x), opt_fn(x)))
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 class AotAutogradFallbackTests(torch._dynamo.test_case.TestCase): from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd.py
fn
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) with self.assertRaisesRegex( RuntimeError, "a leaf Variable that requires grad is being used in an in-place operation.", ): aot_fn(x, y)
def fn(param, y): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(False) param.add_(y) finally: torch.set_grad_enabled(prev_grad) return y y = torch.randn(4) x = torch.nn.Parameter(torch.randn(4)) aot_fn = torch._dynamo.optimize("aot_eager")(fn) # This should not error: we mutated an autograd leaf under no_grad mode. aot_fn(x, y)
from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter, rand_strided from torch.testing._internal.common_utils import compare_equal_outs_and_grads from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._dynamo.test_case import run_tests
import copy import re import unittest from textwrap import dedent from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback import torch.utils._pytree as pytree from torch._dynamo.testing import CompileCounter, expectedFailureDynamic, rand_strided from torch._functorch.aot_autograd import _aot_export_function, create_functional_call from torch._subclasses.fake_tensor import FakeTensorMode from torch.fx.experimental.proxy_tensor import make_fx from torch.profiler import profile from torch.testing import FileCheck from torch.testing._internal.common_utils import compare_equal_outs_and_grads aten = torch.ops.aten lib = torch.library.Library("custom", "DEF") # noqa: TOR901 from functorch.compile import nop from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks from torch._functorch.aot_autograd import aot_export_joint_simple import torch._functorch.config as _config from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/dynamo/test_aot_autograd_cache.py
fn
def fn(x, y): return (x * 2, y @ y) a = torch.rand(25) b = torch.rand(5, 5) compiled_fn = torch.compile(fn, backend="inductor") # A first call should miss in the cache. self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1) # A second call should hit. (First reset so in-memory guards # don't prevent compilation). self._clear_dynamo_and_codecache() self.assertEqual(fn(a, b), compiled_fn(a, b)) self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1) self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 1)
import os import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters from torch._functorch import config as functorch_config from torch._functorch._aot_autograd.autograd_cache import ( AOTAutogradCache, autograd_cache_key, BypassAOTAutogradCache, ) from torch._functorch._aot_autograd.schemas import AOTConfig from torch._inductor import config as inductor_config from torch._inductor.test_case import TestCase as InductorTestCase from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, skipIfWindows, ) from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added