diff --git a/.gitattributes b/.gitattributes index 1975a839c05b3a1031b234f600b5f288db3b2d59..4c88823817ca5ed4de8764b41639477fcb5bf102 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1858,3 +1858,4 @@ infer_4_30_0/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpyth infer_4_30_0/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_30_0/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text infer_4_30_0/lib/python3.10/site-packages/shapely/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55fd281348c332283a62ab4526b10d37c1d1cb84 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5e283350bce58edae771c8894d5e7d9c650c410a4dea435e2b7881987f61bac +size 101055 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b7bea4212230ea3240ac35bc81f17bf83cc24f Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7ef5676814ede47f688a4951423d4f3da878afc Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d22b8e2cf5152c814413dd932415c334fe2d2f Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ef8a051fc064da154aef8aa88b7c4f6404f9411 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1335d9a9a4f94ab8797a8bd202122a5ebc0cb9a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e562ca771393a7615dd57f02382083f6df8ca3b Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c294020c281861b30528db31a53bced2bb958e0 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34e614daba8771a662d78ea65f0d6f8cadcf2039 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8f491434352943f36e5b2e073b0f80296641271 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54155dfd3456f8e1ed703787db79614a2d3b5fbd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145fbaefb8b21903dc34ca73ede19300990a0a96 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22647ab6991b722b0d4c9036ed8eaf048320818a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_mkldnn.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff9b086ee1854713100c3ddc0fdfb3d13e637ebd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e162632a4d01bc1144b94cd4ca466a4c696147d3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_optimizers.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea0b12526bddb95607f0155efca6c82a7c8bfd3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7df32fd8734ba8f818970a5125cbdc7fdaa216d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d346ed6c00fe1908d4977e9cb4c710cdfd425f6c Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b2034ee4bed900fe09742ff619c9c5873bd732b Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4810c201d33f222b10732d62f6e8590de134b5e8 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3f68c17273f2891f0debbed9e26032a3e8c897e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_tensor.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0036005264b1b00b508ecde17ca58f0279eef3b7 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20418b603b4cba240b265790ad3da0dbccb24729 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dynamo_test_failures.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hop_db.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hop_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..717b7479fe6d0b91c10b151edf80f40b320c7dfd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hop_db.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31f431f4342a8cb5862778a8806962534bbd359a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce78945aff94d97d4b6274f17f0a546293b883fd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be9cc610a319e49fed7f18ca6f4a01e4f34b7a3d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e85d4c4cb90570392bbaf54b3e9cefe2fdf6444 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b310d469e08df026261e3a7db5f436089cf45a57 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6193688d46230ffaccea6f7025754ed24173d010 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2d9301a8c7510f65d74ca2de1bb1b59ee8f93d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..420fd31085dc2eff20c4b63779a171c75e4aa943 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/static_module.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/torchbind_impls.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/torchbind_impls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f684d0f09b8f8917ead6c384a29d2aab68ef7a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/torchbind_impls.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb8d6e24243bd700d5681844238d3864f710ecd Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5da944d9f60766a6a5c3b0d4a99bf2e98c951db Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3572cfc4c6a0ddc3d8fa2e1b056415204acdfa --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py @@ -0,0 +1 @@ +# mypy: ignore-errors diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08fbdab64e545717e6168552f27884f7d9875bea Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f31831250eebc74adb677882e1d95d60c62ca9e0 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network1.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef29021b481db8b5075fddbd33a9ac97ea47c63d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/__pycache__/network2.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py new file mode 100644 index 0000000000000000000000000000000000000000..8755643a78cca80668988df9e9db3de75778b5db --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network1.py @@ -0,0 +1,10 @@ +# mypy: ignore-errors + +import torch.nn as nn + + +class Net(nn.Module): + + def __init__(self) -> None: + super().__init__() + self.linear = nn.Linear(10, 20) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py new file mode 100644 index 0000000000000000000000000000000000000000..19b0b8ee53d3b530aa33978c7a13da4e5fee4ebd --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/data/network2.py @@ -0,0 +1,11 @@ +# mypy: ignore-errors + +import torch.nn as nn + + +class Net(nn.Module): + + def __init__(self) -> None: + super().__init__() + self.linear = nn.Linear(10, 20) + self.relu = nn.ReLU() diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bec95c08a89dec1fdc9d322c935933b67b15ae4 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/__pycache__/ddp_under_dist_autograd_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9d5be31472266ee7ddb286d943fab79eb8a417e Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54a4c9ab2b0659c52e337bb68406f635dcdb6abe Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__pycache__/test_common.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dacbc577e686bb22b3c4fd87106fbe54ad2ca9c3 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59a9fd72211200340a068ab81cc0ec2919c54745 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..163c1f2023283955a07d80df07cd71b755d923d6 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/_tensor/__pycache__/common_dtensor.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4697067d33a8a12fbe748145e5871c5957ac930b Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa7f271e97d5fa89c015afa0779021fd991bf800 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/remote_module_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ee3a374c745df8175bf515626896a48c1b2b78d4 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/remote_module_test.py @@ -0,0 +1,734 @@ +# mypy: allow-untyped-defs + +import enum +from typing import Tuple + +import torch +import torch.distributed.rpc as rpc +import torch.testing._internal.dist_utils as dist_utils +from torch import Tensor, nn +from torch._jit_internal import Future +from torch.distributed.nn import RemoteModule +from torch.distributed.nn.api.remote_module import _REMOTE_MODULE_PICKLED_ATTRIBUTES +from torch.distributed.nn.api.remote_module import _RemoteModule +from torch.testing._internal.common_distributed import skip_if_lt_x_gpu +from torch.testing._internal.common_utils import TemporaryFileName +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) + + +_PARAM_VAL = torch.nn.Parameter(torch.ones(1)) + + +# RPC handler for querying the device on the destination worker. +def remote_device(module_rref): + for param in module_rref.local_value().parameters(): + return param.device + + +# RPC handler for querying __dict__ on the destination worker. +def remote_module_attributes(remote_module): + return remote_module.__dict__ + + +# RPC handler for running forward on the destination worker. +def remote_forward(remote_module, args): + return remote_module.forward(*args) + +# RPC handler for running forward_async on the destination worker. +def remote_forward_async(remote_module, args): + # Since future cannot be pickled and sent over the RPC layer, + # have to wait and behave just like ``forward_sync``. + return remote_module.forward_async(*args).wait() + +# RPC handler for getting training mode on the destination worker. +def get_remote_training_arg(module_rref): + return module_rref.local_value().training + +class ModuleCreationMode(enum.Enum): + MODULE_CTOR_WITH_INTERFACE = "module_ctor_with_interface" + MODULE_CTOR = "module_ctor" + + +@torch.jit.interface +class MyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + +@torch.jit.interface +class RemoteMyModuleInterface: + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well + pass + + def forward_async( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Future[Tuple[str, int, Tensor]]: + pass + + +class MyModule(nn.Module): + def __init__(self, first_arg, first_kwarg=-1): + super().__init__() + self.param1 = _PARAM_VAL + + def forward( + self, tensor: Tensor, number: int, word: str = "default" + ) -> Tuple[str, int, Tensor]: + return word, number, tensor + + +class BadModule: + def __init__(self, first_arg, first_kwarg=-1): + pass + + +def create_scripted_module(first_arg, first_kwarg=-1): + module = MyModule(first_arg, first_kwarg=first_kwarg) + scripted_module = torch.jit.script(module) + return scripted_module + + +# Common utils for both CPU and CUDA test suites +class CommonRemoteModuleTest(RpcAgentTestFixture): + @property + def world_size(self): # Override setting in RpcAgentTestFixture + return 2 + + @staticmethod + def _create_remote_module_iter(remote_device, modes=None): + if modes is None: + modes = ModuleCreationMode.__members__.values() + + args = (1,) + kwargs = dict(first_kwarg=2) + + if ModuleCreationMode.MODULE_CTOR in modes: + remote_module = RemoteModule(remote_device, MyModule, args, kwargs) + yield remote_module + + if ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE in modes: + remote_module = _RemoteModule( + remote_device, + create_scripted_module, + args, + kwargs, + _module_interface_cls=MyModuleInterface, + ) + scripted_remote_module = torch.jit.script(remote_module) + yield scripted_remote_module + + +class RemoteModuleTest(CommonRemoteModuleTest): + @dist_utils.dist_init + def test_bad_module(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + remote_device = f"{dst_worker_name}/cpu" + args = (1,) + kwargs = dict(first_kwarg=2) + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + with self.assertRaisesRegex( + ValueError, + r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of ,", + ): + RemoteModule(remote_device, BadModule, args, kwargs).forward() + + + @dist_utils.dist_init + def test_forward_async(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret_fut = remote_module.forward_async(*args) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_async_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward_async(scripted_remote_module: RemoteMyModuleInterface): + ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3") + ret = ret_fut.wait() + return ret + + ret = run_forward_async(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_sync(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2, "3") + for remote_module in self._create_remote_module_iter(dst_worker_name): + ret = remote_module.forward(*args) + self.assertEqual(ret, tuple(reversed(args))) + + @dist_utils.dist_init + def test_forward_sync_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + + @dist_utils.dist_init + def test_forward_with_kwargs(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + args = (torch.ones(1), 2) + kwargs = dict(word="3") + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + ("3",)))) + + @dist_utils.dist_init + def test_remote_parameters(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``remote_parameters``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + param_rrefs = remote_module.remote_parameters() + self.assertEqual(len(param_rrefs), 1) + self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL)) + + @dist_utils.dist_init + def test_get_module_rref(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # Only test Python nn.Module, because script module methods don't support ``get_module_rref``. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + rref = remote_module.get_module_rref() + self.assertEqual(rref, remote_module.module_rref) + for param in rref.to_here().parameters(): + self.assertTrue(torch.equal(param, _PARAM_VAL)) + + @dist_utils.dist_init + def test_train_eval(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module.train() + ret1 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret1, True) + + remote_module.eval() + ret2 = rpc.rpc_sync(dst_worker_name, get_remote_training_arg, args=(remote_module.get_module_rref(),)) + self.assertEqual(ret2, False) + + @dist_utils.dist_init + def test_unsupported_methods(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with self.assertRaisesRegex( + ValueError, r"Method ``register_buffer`` not supported for RemoteModule" + ): + remote_module.register_buffer("buffer", torch.ones(5)) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_parameter`` not supported for RemoteModule", + ): + remote_module.register_parameter( + "param", torch.nn.Parameter(torch.ones(1)) + ) + with self.assertRaisesRegex( + ValueError, r"Method ``add_module`` not supported for RemoteModule" + ): + remote_module.add_module("empty", None) + + with self.assertRaisesRegex( + ValueError, r"Method ``apply`` not supported for RemoteModule" + ): + fn = torch.rand((3, 3), requires_grad=False) + remote_module.apply(fn) + + with self.assertRaisesRegex( + ValueError, r"Method ``cuda`` not supported for RemoteModule" + ): + remote_module.cuda() + with self.assertRaisesRegex( + ValueError, r"Method ``cpu`` not supported for RemoteModule" + ): + remote_module.cpu() + with self.assertRaisesRegex( + ValueError, r"Method ``type`` not supported for RemoteModule" + ): + remote_module.type(torch.FloatTensor) + with self.assertRaisesRegex( + ValueError, r"Method ``float`` not supported for RemoteModule" + ): + remote_module.float() + with self.assertRaisesRegex( + ValueError, r"Method ``double`` not supported for RemoteModule" + ): + remote_module.double() + with self.assertRaisesRegex( + ValueError, r"Method ``bfloat16`` not supported for RemoteModule" + ): + remote_module.bfloat16() + with self.assertRaisesRegex( + ValueError, r"Method ``to`` not supported for RemoteModule" + ): + remote_module.to("cpu", dtype=torch.int32) + + def hook(module, grad_input, grad_output): + pass + + with self.assertRaisesRegex( + ValueError, + r"Method ``register_backward_hook`` not supported for RemoteModule", + ): + remote_module.register_backward_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_pre_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_pre_hook(hook) + with self.assertRaisesRegex( + ValueError, + r"Method ``register_forward_hook`` not supported for RemoteModule", + ): + remote_module.register_forward_hook(hook) + + with self.assertRaisesRegex( + ValueError, r"Method ``state_dict`` not supported for RemoteModule" + ): + remote_module.state_dict() + with self.assertRaisesRegex( + ValueError, r"Method ``load_state_dict`` not supported for RemoteModule" + ): + remote_module.load_state_dict({}) + + with self.assertRaisesRegex( + ValueError, + r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.", + ): + remote_module.parameters() + with self.assertRaisesRegex( + ValueError, + r"Method ``named_parameters`` not supported for RemoteModule", + ): + remote_module.named_parameters() + with self.assertRaisesRegex( + ValueError, r"Method ``buffers`` not supported for RemoteModule" + ): + remote_module.buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``named_buffers`` not supported for RemoteModule" + ): + remote_module.named_buffers() + with self.assertRaisesRegex( + ValueError, r"Method ``children`` not supported for RemoteModule" + ): + remote_module.children() + with self.assertRaisesRegex( + ValueError, r"Method ``named_children`` not supported for RemoteModule" + ): + remote_module.named_children() + with self.assertRaisesRegex( + ValueError, r"Method ``modules`` not supported for RemoteModule" + ): + remote_module.modules() + with self.assertRaisesRegex( + ValueError, r"Method ``named_modules`` not supported for RemoteModule" + ): + remote_module.named_modules() + + with self.assertRaisesRegex( + ValueError, r"Method ``requires_grad_`` not supported for RemoteModule" + ): + remote_module.requires_grad_() + with self.assertRaisesRegex( + ValueError, r"Method ``zero_grad`` not supported for RemoteModule" + ): + remote_module.zero_grad() + with self.assertRaisesRegex( + ValueError, r"Method ``share_memory`` not supported for RemoteModule" + ): + remote_module.share_memory() + with self.assertRaisesRegex( + ValueError, r"Method ``extra_repr`` not supported for RemoteModule" + ): + remote_module.extra_repr() + + @dist_utils.dist_init + def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # If a new attribute is added to this RemoteModule after the initialization, + # and it will be sent over the wire by RPC, + # this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES. + # Note that adding a new attribute out of constructor should rarely happen. + # If a new attribute is added to RemoteModule constructor, + # there is a sanity check to enforce developers to add this attribute to either + # _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + new_attr_name = "new_attr" + setattr(remote_module, new_attr_name, 1) + + attrs = rpc.rpc_sync( + dst_worker_name, remote_module_attributes, (remote_module,) + ) + self.assertNotIn(new_attr_name, attrs) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex( + RuntimeError, + "Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC", + ): + torch.save(remote_module, fname) + + @dist_utils.dist_init + def test_remote_module_py_pickle_not_supported_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + for remote_module in self._create_remote_module_iter( + dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + with TemporaryFileName() as fname: + with self.assertRaisesRegex(torch.jit.Error, "can only be pickled when using RPC"): + torch.save(remote_module, fname) + + +class ThreeWorkersRemoteModuleTest(CommonRemoteModuleTest): + @property + def world_size(self): # Override setting in CommonRemoteModuleTest + return 3 + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + self.assertListEqual(list(attrs.keys()), expected_unpickled_attrs) + self.assertEqual(attrs["on"], "worker1") + self.assertEqual(attrs["device"], "cpu") + self.assertFalse(attrs["is_device_map_set"]) + self.assertFalse(attrs["is_scriptable"]) + + # Test the installed methods on worker1's can be initiated by worker2 over RPC layer. + # NOTE: In practice a remote module should be directly stored on the worker that runs ``forward``` or ``forward_async``, + # not have another worker to initiate forward over the RPC layer. + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync(dst_worker2_name, remote_forward, (remote_module, args)) + self.assertEqual(ret1, tuple(reversed(args))) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward_async, (remote_module, args) + ) + self.assertEqual(ret2, tuple(reversed(args))) + + @dist_utils.dist_init + def test_send_remote_module_over_the_wire_script_not_supported(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Unpickled attributes include both the inherent attributes of RemoteModule + # (not inherited from the superclass) and two installed methods. + expected_unpickled_attrs = list(_REMOTE_MODULE_PICKLED_ATTRIBUTES) + expected_unpickled_attrs.append("forward_async") + expected_unpickled_attrs.append("forward") + + with self.assertRaisesRegex( + RuntimeError, "Passing a script RemoteModule over RPC is not supported." + ): + # Create a remote module on worker1 and then pass it to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE] + ): + # Test querying some simple attributes from worker2. + attrs = rpc.rpc_sync( + dst_worker2_name, remote_module_attributes, (remote_module,) + ) + + @dist_utils.dist_init + def test_create_remote_module_from_module_rref(self): + if self.rank != 0: + return + dst_worker1_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + dst_worker2_name = dist_utils.worker_name((self.rank + 2) % self.world_size) + + # Create a remote module on worker1 and then pass its `module_rref` to worker2 over the RPC layer. + for remote_module in self._create_remote_module_iter( + dst_worker1_name, modes=[ModuleCreationMode.MODULE_CTOR] + ): + remote_module2 = rpc.rpc_sync( + dst_worker2_name, + RemoteModule.init_from_module_rref, + (dst_worker2_name, remote_module.get_module_rref()), + ) + + args = (torch.ones(1), 2, "3") + ret1 = rpc.rpc_sync( + dst_worker1_name, remote_forward, (remote_module, args) + ) + ret2 = rpc.rpc_sync( + dst_worker2_name, remote_forward, (remote_module2, args) + ) + self.assertEqual(ret2, ret2) + + +class CudaRemoteModuleTest(CommonRemoteModuleTest): + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_valid_device(self): + if self.rank != 0: + return + dst_rank = (self.rank + 1) % self.world_size + dst_worker_name = dist_utils.worker_name(dst_rank) + + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + # Test rank works as well. + for remote_module in self._create_remote_module_iter( + f"rank:{dst_rank}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + device = rpc.rpc_sync( + dst_worker_name, remote_device, (remote_module.module_rref,) + ) + self.assertEqual(device.type, "cuda") + self.assertEqual(device.index, 0) + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_invalid_devices(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + with self.assertRaisesRegex( + RuntimeError, + r"Expected one of .+ device type at start of device string", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/foo", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + RuntimeError, r"CUDA error: invalid device ordinal" + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:100", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Invalid device string: 'cpu2'"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cpu2", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex(RuntimeError, r"Device string must not be empty"): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: worker1/cuda:0/cuda:1. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0/cuda:1", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + with self.assertRaisesRegex( + ValueError, + r"Could not parse remote_device: /cuda:0. The valid format is '/'", + ): + [ + m.forward() + for m in self._create_remote_module_iter( + "/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR], + ) + ] + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + # These two CPU tensors (in args and kwargs) should be implicitly moved to an appropriate cuda device. + t1 = torch.ones(1) + args = (t1, 2) + t2 = t1 * 2 + kwargs = dict(word=t2) + + # Only test Python nn.Module, because script module methods don't support taking kwargs. + for remote_module in self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", modes=[ModuleCreationMode.MODULE_CTOR] + ): + ret_fut = remote_module.forward_async(*args, **kwargs) + ret = ret_fut.wait() + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + ret = remote_module.forward(*args, **kwargs) + self.assertEqual(ret, tuple(reversed(args + (t2,)))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[0].device.type, "cpu") + self.assertEqual(ret[2].device.type, "cpu") + + @skip_if_lt_x_gpu(1) + @dist_utils.dist_init + def test_input_moved_to_cuda_device_script(self): + if self.rank != 0: + return + dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size) + + scripted_remote_module = next( + self._create_remote_module_iter( + f"{dst_worker_name}/cuda:0", + modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE], + ) + ) + + @torch.jit.script + def run_forward(scripted_remote_module: MyModuleInterface): + ret = scripted_remote_module.forward(torch.ones(1), 2, "3") + return ret + + ret = run_forward(scripted_remote_module) + + self.assertEqual(ret, ("3", 2, torch.ones(1))) + # TODO: Once the RPC backend can support directly sending GPU tensors, the expected device type should be "cuda:0". + self.assertEqual(ret[2].device.type, "cpu") diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ffccbd6c7a014ed50da8da39020474245dacca5 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..094cb89ef26a10483a0df75cc3645be6e076fc0a Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81ba602967cf60caa3aa13645ae68d8dcc8377d6 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/dist_optimizer_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee28aef1e2e34f5d1c72779dc9092ce8098104d Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_agent_rpc_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c3ab8476f14751f631178aa1e38ae12a655ee22 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/faulty_rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6727997efd5b6fa36542ad002337a22cde407956 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_agent_test_fixture.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b9db1da1b480c1b8fabf1b6d9935f7bd405142f Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd47f1f38e325d575f6ccc6b141e24f3959d28a4 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e9125ba0ebe7e0623a12ad1a1cd7eeb7d2749a3a --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py @@ -0,0 +1,7 @@ +# mypy: ignore-errors + +from .make_fx import make_fx_check +from .aot_autograd import aot_autograd_check, _test_aot_autograd_forwards_backwards_helper +from .fake_tensor import fake_check +from .autograd_registration import autograd_registration_check +from .generate_tests import generate_opcheck_tests, opcheck, OpCheckError, dontGenerateOpCheckTests, is_inside_opcheck_mode diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0316e5a54098187f70a11057777a97b97d71041 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..760506dff2c0ac4a3bc58433458d779e9b2ff9bc Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/aot_autograd.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c8ca43a77deafb9ff2648ad9cfbf8c5ce4f7165 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/autograd_registration.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a88023e385284ed73bc26880c77a5ffb82d43586 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/fake_tensor.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62745b3187f33ee5661c8bfcca2482409bbb9532 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/make_fx.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..a5552e23c8a4674b9cdfaa94dc37b658645957a5 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py @@ -0,0 +1,148 @@ +# mypy: ignore-errors + +import torch +import torch.utils._pytree as pytree +from torch.testing._utils import wrapper_set_seed +from functorch.compile import compiled_function, min_cut_rematerialization_partition, nop +from .make_fx import randomize +import re + + +class assert_raises_regex: + def __init__(self, exception_cls, regex): + self.exception_cls = exception_cls + self.regex = regex + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, traceback): + if exc_type == self.exception_cls: + msg = str(exc_val) + if not re.search(self.regex, msg): + raise AssertionError( + f"Expected exception to match regex. regex: {self.regex}, exception: {msg}") + return True # Squashes the exception + if exc_type is not None: + raise AssertionError( + f"Expected {self.exception_cls} to be raised, instead got exception {exc_type}") + raise AssertionError("Expected exception to be raised but none was") + + +def aot_autograd_check( + func, + args, + kwargs, + dynamic, + assert_raises_regex_fn=assert_raises_regex, + assert_equals_fn=torch.testing._comparison.assert_close, + check_gradients=True, + try_check_data_specialization=False): + """Compares func(*args, **kwargs) in eager-mode to under AOTAutograd. + + Compares outputs and (if check_gradients=True) gradients produced by + AOTAutograd against eager-mode PyTorch. + + We assume that func(*args, **kwargs) succeeds in eager-mode PyTorch. + + """ + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + args_is_tensor = [isinstance(arg, torch.Tensor) for arg in flat_args] + args = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + + # We construct a new function that only accepts Tensors as inputs + def func_no_tensors(args): + reconstructed_flat_args = [] + args = iter(args) + for v in flat_args: + if isinstance(v, torch.Tensor): + reconstructed_flat_args.append(next(args)) + else: + reconstructed_flat_args.append(v) + + c_args, c_kwargs = pytree.tree_unflatten(reconstructed_flat_args, args_spec) + return func(*c_args, **c_kwargs) + + compiled_f = compiled_function( + func_no_tensors, nop, nop, dynamic=dynamic, partition_fn=min_cut_rematerialization_partition) + + out = wrapper_set_seed(func_no_tensors, args) + if check_gradients == "auto": + any_tensor_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, args) + any_output_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, out) + check_gradients = any_tensor_requires_grad and any_output_requires_grad + if not check_gradients: + compiled_out = wrapper_set_seed(compiled_f, args) + assert_equals_fn(compiled_out, out, msg=outputs_msg) + return + _test_aot_autograd_forwards_backwards_helper( + func_no_tensors, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization) + +outputs_msg = ( + "Outputs of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect output " + "underneath torch.compile. This could be because the operator's " + "implementation not traceable or that there is a bug in AOTAutograd." +) + + +def _test_aot_autograd_forwards_backwards_helper( + f, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization): + # Verify grads are equal between compiled and non-compiled versions of f. + + def call_forwards_backwards(f, args): + flat_args = pytree.arg_tree_leaves(*args) + diff_args = [arg for arg in flat_args if isinstance(arg, torch.Tensor) and + arg.requires_grad] + out = wrapper_set_seed(f, args) + flat_out = pytree.tree_leaves(out) + + sm = 0 + for i in flat_out: + if isinstance(i, torch.Tensor): + # We need to call .abs() because it is possible that the output of the + # operator is a complex Tensor and autograd will yell at autograd.grad + # on a complex Tensor unless we manually provide the grad_output flag. + sm += i.sum().abs() + assert isinstance(sm, torch.Tensor) + return out, torch.autograd.grad(sm, diff_args, allow_unused=True) + + def check(args, ignore_failure=False): + try: + orig_out, orig_grad = call_forwards_backwards(f, args) + except Exception: + if ignore_failure: + return + raise + + # See https://github.com/pytorch/pytorch/pull/98960#issuecomment-1505962215 + tensor_args = [x for x in pytree.tree_flatten(args)[0] if isinstance(x, torch.Tensor)] + any_non_leaves = any(x.grad_fn is not None for x in tensor_args) + if all(x is None for x in orig_grad) and any_non_leaves: + with assert_raises_regex_fn(RuntimeError, 'does not require grad and does not have a grad_fn'): + call_forwards_backwards(compiled_f, args) + return + + msg = ( + "Gradients of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect gradients " + "underneath torch.compile. This could be because the operator's " + "backward is incorrectly registered or not traceable or that there " + "is a bug in AOTAutograd." + ) + + compiled_out, compiled_grad = call_forwards_backwards(compiled_f, args) + assert_equals_fn(compiled_out, orig_out, msg=outputs_msg) + assert_equals_fn(compiled_grad, orig_grad, msg=msg) + + check(args, ignore_failure=False) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if this test fails. + if try_check_data_specialization: + args = randomize(args) + check(args, ignore_failure=True) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py new file mode 100644 index 0000000000000000000000000000000000000000..25df4f1d03fcf50fc5869d8af93cd128f98e9c72 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py @@ -0,0 +1,132 @@ +# mypy: ignore-errors + +import contextlib + +import torch +import torch.utils._pytree as pytree + + +@contextlib.contextmanager +def set_autograd_fallback_mode(mode): + prev = torch._C._get_autograd_fallback_mode() + try: + torch._C._set_autograd_fallback_mode(mode) + yield + finally: + torch._C._set_autograd_fallback_mode(prev) + + +def autograd_registration_check(op, args, kwargs): + """Check if autograd was registered correctly (for the operator). + + Operators should have "autograd support" registered directly to an + autograd dispatch key. + An incorrect registration may lead to unexpected silent incorrectness. + Note that this check won't catch all problems but will catch + the most common ones. + + Example usage: + >>> x = torch.randn(3, requires_grad=True) + >>> autograd_registration_check(torch.ops.aten.sin.default, (x,), {}) + + Here are some best practices if you do find your autograd is + registered incorrectly: + - If the operator is composite (i.e. consists of other PyTorch ops) + and you wish the operator to decompose and get autograd support + that way, then please register the implementation to + DispatchKey::CompositeImplicitAutograd + - If you're adding an autograd formula for the operator, the correct + thing to do is to register an autograd.Function to + DispatchKey::Autograd (preferred) or one of the + DispatchKey::Autograd keys. It is NOT OK to register + an autograd.Function to a backend (e.g. CPU/CUDA) key. + - If your operator is non-differentiable, then you should register + an implementation to the Autograd key that uses + AutoDispatchBelowAutograd and re-invokes the operator. + + """ + assert isinstance(op, torch._ops.OpOverload) + # Implementation details + # ----------------------------------------------- + # If an operator doesn't have an autograd kernel at an autograd key, + # and the operator does not return inputs as-is, then all of + # the outputs should have requires_grad=False before we apply + # special behaviors of our default autograd fallback. + # (The default autograd fallback may set requires_grad=True on output + # tensors in certain modes so that when they are backpropped through, + # they raise an error). + # + # Our strategy for detecting if an operator doesn't have an autograd + # kernel at the autograd key is: + # - set the autograd fallback mode to "nothing" (so it does not change + # the required-gradness of outputs) + # - run the operator + # - Check if any outputs of the operator (that are not inputs) require + # grad. This would only happen if the user calls regular PyTorch + # operations in their backend key (this op should instead be + # CompositeImplicitAutograd or not an op) or if the user invokes + # an autograd.Function in the backend key. + # + # Note that it's already likely a bug if the operator directly returns + # an input as output (because custom ops don't have a good way of + # constructing true in-place or out variants), but we defer that + # responsibility to a different test (schema_check). + + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + all_tensors = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + if not any(t.requires_grad for t in all_tensors): + raise RuntimeError( + "autograd_registration_check: no inputs have requires_grad=True so " + "we are unable to actually perform this test. Please pass inputs " + "that do require grad." + ) + + # Determine which AutogradBACKEND key to check + all_device_types = {arg.device.type for arg in all_tensors} + if not all_device_types.issubset(["cpu", "cuda"]): + # Don't want to support other keys yet + raise NotImplementedError( + f"autograd_registration_check: NYI devices other than CPU/CUDA, got {all_device_types}" + ) + if "cuda" in all_device_types: + key = "AutogradCUDA" + elif "cpu" in all_device_types: + key = "AutogradCPU" + + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), key): + return + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), "Autograd"): + return + if torch._C._dispatch_has_kernel_for_dispatch_key( + op.name(), "CompositeImplicitAutograd" + ): + return + + # At this point, we know the operator doesn't have a kernel registered to an + # autograd key. Let's proceed with our test. + with set_autograd_fallback_mode("nothing"): + all_outs = op(*args, **kwargs) + + inp_ids = {id(arg) for arg in flat_args} + + def not_an_input_and_requires_grad(tensor): + if not tensor.requires_grad: + return False + if id(tensor) in inp_ids: + return False + return True + + if not pytree.tree_any_only(torch.Tensor, not_an_input_and_requires_grad, all_outs): + return + + raise AssertionError( + f"{op.name()}: at least one output of this operator has requires_grad=True " + f"but the operator does not have an autograd kernel defined at an autograd " + f"key (e.g. DispatchKey::Autograd). This could mean that you have " + f"incorrectly registered an autograd kernel to a non-Autograd DispatchKey, " + f"which may lead to silently incorrect results. If your operator consists " + f"of regular PyTorch operations, consider not using an operator at all " + f"or registering your operator as CompositeImplicitAutograd. If you have " + f"an autograd.Function registered to a backend (CPU/CUDA) key, the correct " + f"location for it is the Autograd key." + ) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..5e60f50189b5dc3ab43fdd97120d5fa23559a84e --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py @@ -0,0 +1,12 @@ +# mypy: ignore-errors + +import torch._subclasses + + +def is_builtin(op): + return op.namespace in ('aten', 'prims', 'prim') + + +def fake_check(op, args, kwargs): + with torch._subclasses.CrossRefFakeMode(ignore_op_fn=is_builtin): + op(*args, **kwargs) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..7fac1e57c6ac8e13c0a0f7c3f53592d91ce28bbf --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py @@ -0,0 +1,825 @@ +# mypy: ignore-errors + +import datetime +import difflib +import functools +import inspect +import json +import os +import re +import tempfile +import threading +import unittest +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch._dynamo +import torch.utils._pytree as pytree +from torch._dynamo.utils import clone_input +from torch._library.custom_ops import CustomOpDef +from torch._subclasses.schema_check_mode import SchemaCheckMode +from torch._utils_internal import get_file_path_2 +from torch.overrides import TorchFunctionMode +from torch.testing._internal.optests import ( + aot_autograd_check, + autograd_registration_check, + fake_check, +) + + +def dontGenerateOpCheckTests(reason: str): + def inner(fun): + fun._torch_dont_generate_opcheck_tests = True + return fun + + return inner + + +def is_abstract(tensor: torch.Tensor) -> bool: + if tensor.is_meta: + return True + if torch._subclasses.fake_tensor.is_fake(tensor): + return True + return False + + +def safe_schema_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> Any: + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + with SchemaCheckMode(): + result = op(*args, **kwargs) + return result + + +def safe_autograd_registration_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + # Don't perform autograd_registration_check if none of the inputs require grad. + if not pytree.tree_any_only( + torch.Tensor, lambda x: x.requires_grad, (args, kwargs) + ): + return + return autograd_registration_check(op, args, kwargs) + + +def safe_fake_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + return fake_check(op, args, kwargs) + + +def safe_aot_autograd_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + dynamic: bool, + *, + copy_inputs: bool = True, +) -> Any: + # NB: copy_inputs does nothing for aot_autograd_check: it always needs to copy + # inputs. + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + + def func(*args, **kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, torch.clone, (args, kwargs)) + return op(*args, **kwargs) + + # aot_autograd_check runs func(*args, **kwargs) multiple times + # and assumes `func` does not modify its inputs. + return aot_autograd_check(func, args, kwargs, dynamic, check_gradients="auto") + + +def deepcopy_tensors(inputs: Any) -> Any: + return pytree.tree_map_only(torch.Tensor, clone_input, inputs) + + +# Test util requirements +# - The test util must have signature (op: OpOverload, args, kwargs) +# - The test util must NOT mutate args, kwargs. +# - The test utils in this list must not be prefixes of each other. For example, +# having both "test_schema" and "test_schema_is_functional" is NOT OK. +# - The order of items in this dict matters (for opcheck), we'll run them +# in order. +ALL_TEST_UTILS = { + "test_schema": safe_schema_check, + "test_autograd_registration": safe_autograd_registration_check, + "test_faketensor": safe_fake_check, + "test_aot_dispatch_static": functools.partial( + safe_aot_autograd_check, + dynamic=False, + ), + "test_aot_dispatch_dynamic": functools.partial( + safe_aot_autograd_check, + dynamic=True, + ), +} + +GDOC = "https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit" + +DEFAULT_TEST_UTILS = [ + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_dynamic", +] + +DEPRECATED_DEFAULT_TEST_UTILS = DEFAULT_TEST_UTILS + [ + "test_aot_dispatch_static", +] + + +def generate_opcheck_tests( + testcase: Any, + namespaces: List[str], + failures_dict_path: Optional[str] = None, + additional_decorators: Dict[str, Callable] = None, + test_utils: List[str] = DEFAULT_TEST_UTILS, +) -> None: + """Given an existing TestCase, use the existing tests to generate + additional validation tests for custom operators. + + For {all existing tests in the TestCase} x {all test utils}, + we will generate one new test. The new test runs a TorchFunctionMode + that intercepts ``op(*args, **kwargs)`` calls and invokes + ``test_util(op, *args, **kwargs)``, where ``op`` is an operator. + + The test_util that we support are in ALL_TEST_UTILS. They are: + - test_schema: This runs SchemaCheckMode. + - test_autograd_registration: This runs autograd_registration_check. + - test_faketensor: This runs CrossRefFakeMode. + - test_aot_dispatch_static: This runs aot_autograd_check, which: + checks that the outputs (and gradients, if they are computable) + are the same under eager-mode PyTorch and using AOTAutograd. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + runs AOTAutograd using dynamic shapes instead of static shapes. + + The generated test will have name ``{test_util}__{original_name}``. + For example, if there is a method named ``test_cumsum``, then + we will generate a ``test_schema__test_cumsum``, + ``test_faketensor__test_cumsum``, etc. + + For more details, see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit + + Args: + testcase: The testcase we will modify and generate additional tests for. + namespaces: We will only intercept calls to custom operators with these + namespaces. + failures_dict_path: See ``validate_failures_dict_structure`` for more details + test_utils: a list of test_utils to generate. Example: ["test_schema", "test_faketensor"] + """ + if additional_decorators is None: + additional_decorators = {} + test_methods = [ + m + for m in dir(testcase) + if m.startswith("test_") and callable(getattr(testcase, m)) + ] + if failures_dict_path is None: + # The default failures_dict_path is failures_dict.json in + # the same directory as the test file. + prev_frame = inspect.currentframe().f_back + filename = inspect.getframeinfo(prev_frame)[0] + failures_dict_path = get_file_path_2( + os.path.dirname(filename), "failures_dict.json" + ) + failures_dict = FailuresDict.load( + failures_dict_path, create_file=should_update_failures_dict() + ) + validate_failures_dict_structure(failures_dict, test_utils, testcase) + validate_failures_dict_formatting(failures_dict_path) + + def construct_method(attr, prefix, tester): + method = getattr(testcase, attr) + if getattr(method, "_torch_dont_generate_opcheck_tests", False): + return + new_method_name = prefix + "__" + attr + + @functools.wraps(method) + def new_method(*args, **kwargs): + with OpCheckMode( + namespaces, + prefix, + tester, + failures_dict, + f"{testcase.__name__}.{new_method_name}", + failures_dict_path, + ): + result = method(*args, **kwargs) + return result + + if pytestmark := new_method.__dict__.get("pytestmark"): + import pytest + + # check if we need to simplify the parametrize marks + # NB: you need to add this mark to your pytest.ini + opcheck_only_one = False + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "opcheck_only_one": + opcheck_only_one = True + + if opcheck_only_one: + new_pytestmark = [] + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "parametrize": + argnames, argvalues = mark.args + assert not mark.kwargs, "NYI" + # Special case for device, we want to run on all + # devices + if argnames != "device": + new_pytestmark.append( + pytest.mark.parametrize( + argnames, (next(iter(argvalues)),) + ) + ) + continue + new_pytestmark.append(mark) + new_method.__dict__["pytestmark"] = new_pytestmark + + if new_method_name in additional_decorators: + for dec in additional_decorators[new_method_name]: + new_method = dec(new_method) + + if hasattr(testcase, new_method_name): + raise RuntimeError( + f"Tried to autogenerate {new_method_name} but {testcase} already " + f"has method named {new_method_name}. Please rename the original " + f"method on the TestCase." + ) + setattr(testcase, new_method_name, new_method) + + test_utils = {name: ALL_TEST_UTILS[name] for name in test_utils} + for attr in test_methods: + for prefix, tester in test_utils.items(): + construct_method(attr, prefix, tester) + + generate_tag_tests(testcase, failures_dict, additional_decorators) + + +def generate_tag_tests(testcase, failures_dict, additional_decorators): + def generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests): + def inner(self): + try: + op = torch._library.utils.lookup_op(qualname) + except AttributeError as e: + # Operator not importable in this test file + raise unittest.SkipTest(f"Can't import operator {qualname}") from e + op_marked_as_compliant = torch.Tag.pt2_compliant_tag in op.tags + if not op_marked_as_compliant: + return + if not definitely_not_pt2_compliant: + return + raise AssertionError( + f"op '{qualname}' was tagged with torch.Tag.pt2_compliant_tag " + f"but it failed some of the generated opcheck tests " + f"({xfailed_tests}). This may lead to silent correctness issues, " + f"please fix this." + ) + + return inner + + for qualname, test_dict in failures_dict.data.items(): + xfailed_tests = [ + test + for test, status_dict in test_dict.items() + # We're about to delete the following test after Ed's PR + # to specialize on C++ .size() calls + if "test_aot_dispatch_static" not in test + and status_dict["status"] == "xfail" + ] + definitely_not_pt2_compliant = len(xfailed_tests) > 0 + generated = generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests) + + # Could result in collisions, but unlikely. We'll raise if we see one below. + mangled_qualname = qualname.replace("::", "_").replace(".", "_") + test_name = "test_pt2_compliant_tag_" + mangled_qualname + + # You can skip this test via the additional_decorators argument + # in generate_opcheck_tests + if test_name in additional_decorators: + for decorator in additional_decorators[test_name]: + generated = decorator(generated) + + if hasattr(testcase, test_name): + raise RuntimeError( + f"Tried to generate a test named {test_name}, but it exists " + f"already. This could be because of a name collision (where " + f"we generated two tests with the same name), or where we " + f"generated a test with the same name as an existing test." + ) + setattr(testcase, test_name, generated) + + +TEST_OPTIONS = ("xfail", "skip", "xsuccess") + + +def validate_failures_dict_formatting(failures_dict_path: str) -> None: + with open(failures_dict_path) as fp: + actual = fp.read() + failures_dict = FailuresDict.load(failures_dict_path) + expected = failures_dict._save(to_str=True) + if actual == expected: + return + if should_update_failures_dict(): + failures_dict = FailuresDict.load(failures_dict_path) + failures_dict.save() + return + expected = expected.splitlines(1) + actual = actual.splitlines(1) + diff = difflib.unified_diff(actual, expected) + diff = "".join(diff) + raise RuntimeError( + f"\n{diff}\n\nExpected the failures dict to be formatted " + f"a certain way. Please see the above diff; you can correct " + f"this either manually or by re-running the test with " + f"PYTORCH_OPCHECK_ACCEPT=1" + ) + + +def validate_failures_dict_structure( + failure_dict: "FailuresDict", test_utils: List[str], testcase: Any +) -> None: + """Validates the failures dict. + + The failure dict looks something like the following. + It maps operator name (qualname) to a list of autogenerated tests. + Each autogenerated test may have a check for the operator (if the operator is + called by the test); the dictionary specifies if we should skip the check, + or if we expect some check to fail. + + { + "fbgemm::split_lengths": { + "test_schema__test_split_lengths": { + "comment": "you can put whatever you want into the comment section", + "status": "xfail", + } + "test_schema__test_split_lengths_empty": { + "comment": "", + "status": "skip", + }, + }, + "fbgemm::gather_lengths": { + "test_schema__test_gather_lengths": { + "comment": "", + "status": "skip", + }, + }, + } + + """ + failure_dict = failure_dict.data + qualnames = list(failure_dict.keys()) + for test_to_option in failure_dict.values(): + test_names = list(test_to_option.keys()) + for test_name, test_dict in test_to_option.items(): + if set(test_dict.keys()) != set({"comment", "status"}): + raise RuntimeError( + "in failures_dict, expected sub-dict to have keys 'comment' and 'status'" + ) + test_option = test_dict["status"] + if test_option not in TEST_OPTIONS: + raise RuntimeError( + f"In failures_dict, got status={test_option} but it needs to be in {TEST_OPTIONS}" + ) + test_class, actual_test_name = test_name.split(".") + if not any(actual_test_name.startswith(test) for test in test_utils): + raise RuntimeError( + f"In failures_dict, test name '{test_name}' should begin with one of {test_utils}" + ) + for test in test_utils: + if not actual_test_name.startswith(test): + continue + base_test_name = actual_test_name[len(test) + 2 :] + # remove potential pytest parametrization suffix + base_test_name = re.sub(r"\[.*\]", "", base_test_name) + if testcase.__name__ != test_class: + continue + if hasattr(testcase, base_test_name): + continue + raise RuntimeError( + f"In failures dict, got test name '{test_name}'. We parsed this as " + f"running test '{test}' on '{base_test_name}', but " + f"{base_test_name} does not exist on the TestCase '{testcase.__name__}]. " + f"Maybe you need to change the test name?" + ) + + +def should_update_failures_dict() -> bool: + key = "PYTORCH_OPCHECK_ACCEPT" + return key in os.environ and os.environ[key] == "1" + + +_is_inside_opcheck_mode = threading.local() +_is_inside_opcheck_mode.value = False + + +def is_inside_opcheck_mode(): + return _is_inside_opcheck_mode.value + + +class OpCheckMode(TorchFunctionMode): + """ + For a given test, OpCheckMode intercepts calls to operators and runs + test_util(op, args, kwargs) for each intercepted (op, args, kwargs). + """ + + def __init__( + self, + namespaces: List[str], + test_util_name: str, + test_util: Callable, + failures_dict: "FailuresDict", + test_name: str, + failures_dict_path: str, + ): + # We will intercept calls to ops with these namespaces + self.namespaces = namespaces + # The test utility function. Its signature should be (op, args, kwargs) -> None. + # Examples of test utilities are: schema_check, make_fx_check + self.test_util = test_util + self.test_util_name = test_util_name + # The name of the test that is running this OpCheckMode. + self.test_name = test_name + # Maps qualname -> test_name -> skip/xfail + # Tells us if we should skip a test or assert that there is a failure. + self.failures_dict = failures_dict + # Location of the failures dict. Makes it so that the error message is better. + self.failures_dict_path = failures_dict_path + + # OpCheckMode surpresses errors, collects them here, and then raises them on exit. + # Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)] + self.seen_ops_to_errors = {} + + def maybe_raise_errors_on_exit(self) -> None: + # Check expected failures first + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if len(self.seen_ops_to_errors[qualname]) == 0: + if should_update_failures_dict(): + self.failures_dict.set_status( + qualname, self.test_name, "xsuccess", comment="" + ) + else: + if option == "xfail": + raise OpCheckError( + f"generate_opcheck_tests: Unexpected success for operator " + f"{qualname} on test {self.test_name}. This may mean that " + f"you have fixed this test failure. Please rerun the test with " + f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner " + f"or manually remove the " + f"expected failure in the failure dict at " + f"{self.failures_dict_path}" + f"For more details, see " + f"{GDOC}" + ) + continue + failed_ops = [] + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if option != "xsuccess": + continue + if len(self.seen_ops_to_errors[qualname]) == 0: + continue + failed_ops.append(qualname) + if not failed_ops: + return + + if should_update_failures_dict(): + for op in failed_ops: + self.failures_dict.set_status(op, self.test_name, "xfail") + return + + # Raise from the first error but also report about all of them to make + # recording xfails easier. + ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0] + repro_command = generate_repro( + self.test_util_name, op, args, kwargs, save_data=should_print_better_repro() + ) + raise OpCheckError( + f"Test generated by `generate_opcheck_tests`, {self.test_name}, " + f"failed on operators {failed_ops}. This usually means that the " + f"operators are not implemented correctly and may lead to silently " + f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, " + f"or please see " + f"{GDOC} " + f"for more recommendations. " + f"To reproduce this problem locally, try to run the following:\n{repro_command}" + ) from ex + + def __enter__(self, *args, **kwargs): + self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value + self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "") + _is_inside_opcheck_mode.value = True + os.environ["TORCHDYNAMO_DISABLE"] = "1" + return super().__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + _is_inside_opcheck_mode.value = self.prev_is_opcheck_mode + os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable + try: + self.maybe_raise_errors_on_exit() + if should_update_failures_dict(): + self.failures_dict.save() + finally: + result = super().__exit__(*args, **kwargs) + return result + + def run_test_util(self, op, args, kwargs): + try: + self.test_util(op, args, kwargs, copy_inputs=False) + except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: + # We might get here if the input is already a FakeTensor + # or if we're in a torch.compile block. Just ignore these + # since we can't handle them and reporting them as failures + # is too noisy. + pass + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # Only intercept calls to operators + if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)): + return func(*args, **kwargs) + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or torch._dynamo.is_compiling() + ): + return func(*args, **kwargs) + # Pre-existing code may not use the .default overload. If we see an + # OpOverloadPacket and we cannot resolve the overload, then we just throw + # and ask the user to clarify. Otherwise, we attempt to resolve the overload. + if isinstance(func, torch._ops.OpOverloadPacket): + func = resolve_unique_overload_or_throw(func) + qualname = func.name() + ns = qualname.split("::")[0] + if ns not in self.namespaces: + return func(*args, **kwargs) + + args_c, kwargs_c = deepcopy_tensors((args, kwargs)) + result = func(*args, **kwargs) + + option = self.failures_dict.get_status(qualname, self.test_name) + if option == "xsuccess" or option == "xfail": + # Surpress all errors during execution. Raise them during __exit__. + try: + if qualname not in self.seen_ops_to_errors: + self.seen_ops_to_errors[qualname] = [] + self.run_test_util(func, args_c, kwargs_c) + except Exception as ex: + if should_print_better_repro(): + self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs)) + else: + self.seen_ops_to_errors[qualname].append((ex, func, None, None)) + elif option == "skip": + pass + return result + + +def should_print_better_repro() -> None: + """If set, the tests generated by `generate_opcheck_tests` will print a + repro command on failure. + + In order to print the repro command, we need to save some tensors to disk. + These will be saved under the following directory: + {tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/. + + Although this is a temp folder, it will usually not automatically get cleaned + up, so you'll need to manually delete it. + """ + key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO" + if key not in os.environ: + return False + value = os.environ[key] + return value == "1" or value == 1 + + +def opcheck( + op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef], + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + test_utils: Union[str, Sequence[str]] = DEFAULT_TEST_UTILS, + raise_exception: bool = True, +) -> Dict[str, str]: + """See torch.library.opcheck for docstring""" + + if kwargs is None: + kwargs = {} + if isinstance(op, CustomOpDef): + op = op._opoverload + if isinstance(op, torch._ops.OpOverloadPacket): + op = resolve_unique_overload_or_throw(op) + if not isinstance(op, torch._ops.OpOverload): + raise ValueError( + f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, " + f"e.g. torch.ops.aten.sin.default, got {type(op)}" + ) + if test_utils == "ALL": + test_utils = tuple(ALL_TEST_UTILS.keys()) + if isinstance(test_utils, str): + test_utils = (test_utils,) + if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset( + ALL_TEST_UTILS.keys() + ): + raise ValueError( + f"opcheck(op, ..., test_utils={test_utils}), expected test_utils " + f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not" + ) + + results_dict = {} + for test_util in test_utils: + tester = ALL_TEST_UTILS[test_util] + try: + tester(op, args, kwargs) + results_dict[test_util] = "SUCCESS" + except Exception as ex: + if raise_exception: + raise OpCheckError( + f"opcheck(op, ...): {test_util} failed with {ex} " + f"(scroll up for stack trace)" + ) from ex + results_dict[test_util] = ex + return results_dict + + +class OpCheckError(Exception): + pass + + +def generate_repro( + test: str, + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + save_data: bool, + dry_run: bool = False, +) -> str: + if save_data: + now = datetime.datetime.now() + path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete") + unix_timestamp = datetime.datetime.timestamp(now) * 100000 + filepath = os.path.join(path, f"repro_{unix_timestamp}.pt") + if not dry_run: + os.makedirs(path, exist_ok=True) + torch.save((args, kwargs), filepath) + args_kwargs = f'args, kwargs = torch.load("{filepath}")' + else: + args_kwargs = ( + "# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n" + "# we will fill them in same (args, kwargs) as in your test\n" + "args = () # args to the operator\n" + "kwargs = {} # kwargs to the operator" + ) + + ns, name = op._schema.name.split("::") + overload = op._overloadname + + repro_command = ( + f"# =========================================================\n" + f"# BEGIN REPRO SCRIPT\n" + f"# =========================================================\n" + f"import torch\n" + f"from torch.testing._internal.optests import opcheck\n" + f"\n" + f"# Make sure you have loaded the library that contains the op\n" + f"# via an import or torch.ops.load_library(...)\n" + f"op = torch.ops.{ns}.{name}.{overload}\n" + f"\n" + f"{args_kwargs}\n" + f'opcheck(op, args, kwargs, test_utils="{test}")\n' + f"# =========================================================\n" + f"# END REPRO SCRIPT\n" + f"# =========================================================\n" + ) + return repro_command + + +def resolve_unique_overload_or_throw( + op: torch._ops.OpOverloadPacket, +) -> torch._ops.OpOverload: + all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name) + if len(all_schemas) != 1: + raise RuntimeError( + f"opcheck can only test operators without overloads. " + f"Got the following overloads for {op._qualified_op_name}: " + f"{[schema.overload_name for schema in all_schemas]}" + ) + + overload_name = all_schemas[0].overload_name + if overload_name == "": + return op.default + return getattr(op, overload_name) + + +DUMP_OPTIONS = {"indent": 2, "sort_keys": True} + + +FailuresDictData = Dict[str, Dict[str, Dict[str, str]]] + + +VERSION = 1 +DESCRIPTION = ( + f"This is a dict containing failures for tests autogenerated by " + f"generate_opcheck_tests. " + f"For more details, please see {GDOC}" +) + + +class FailuresDict: + def __init__(self, path: str, data: FailuresDictData): + self.path = path + self.data = data + + @staticmethod + def load(path, *, create_file=False) -> "FailuresDict": + if create_file and not os.path.exists(path): + result = FailuresDict(path, {}) + FailuresDict.save() + return result + with open(path) as fp: + contents = fp.read() + if contents.strip() == "": + dct = { + "_description": DESCRIPTION, + "data": {}, + "_version": VERSION, + } + else: + dct = json.loads(contents) + assert "data" in dct + assert "_version" in dct and dct["_version"] == VERSION + return FailuresDict(path, dct["data"]) + + def _save(self, to_str=False) -> Optional[str]: + to_dump = { + "_description": DESCRIPTION, + "data": self.data, + "_version": VERSION, + } + # json.dumps doesn't end with a newline. Let's add one because files + # should end in newlines. + serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n" + if to_str: + return serialized + with open(self.path, "w") as fp: + fp.write(serialized) + return None + + def save(self) -> None: + return self._save() + + def get_status(self, qualname: str, test_name: str) -> str: + if qualname not in self.data: + return "xsuccess" + dct = self.data[qualname] + if test_name not in dct: + return "xsuccess" + return dct[test_name]["status"] + + def set_status( + self, + qualname: str, + test_name: str, + status: str, + *, + comment: Optional[str] = None, + ): + if qualname not in self.data: + self.data[qualname] = {} + dct = self.data[qualname] + if test_name not in dct: + dct[test_name] = {"status": None, "comment": ""} + + if status == "xsuccess": + # The default status is "xsuccess". + del dct[test_name] + else: + dct[test_name]["status"] = status + if comment is not None: + dct[test_name]["comment"] = comment diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..83cefd18bc059a9667c4d224fab360d6d41cfe34 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py @@ -0,0 +1,89 @@ +# mypy: ignore-errors + +import torch +from torch.fx.experimental.proxy_tensor import make_fx +from torch.testing._utils import wrapper_set_seed +import torch.utils._pytree as pytree + + +def make_fx_check( + func, + args, + kwargs, + tracing_mode, + assert_close=torch.testing.assert_close, + randomize_data=False, +): + f, *new_args = handle_sizes_for_dynamic_shapes(func, args, kwargs) + + def run(f, *args, **kwargs): + return wrapper_set_seed(f, *args, **kwargs) + + traced_f = make_fx(f, tracing_mode=tracing_mode)(*new_args) + + msg = ( + "op(*args, **kwargs) and make_fx(op)(*args, **kwargs) produced different " + "values. This could mean that your abstract impls (meta/FakeTensor impls) " + "are incorrect, that your operator is not completely traceable (e.g., " + "it relies on some global state), or that there is a bug in make_fx. " + "Note that if you passed a python function (and not an operator) to " + "make_fx_check, it is still possible that the python function will still " + "work with torch.compile because it handles capturing pieces of " + "your python code to compile." + ) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if we used + # random data and it fails. + if randomize_data: + new_args = randomize(new_args) + try: + expected = run(f, *new_args) + except Exception: + if randomize_data: + return + raise + result = run(traced_f, *new_args) + assert_close(result, expected, msg=msg) + + +# Arguably we should make make_fx promote torch.Size() objects to symbolic shapes. +# Absent that, here is our strategy: +# +# If any argument is a torch.Size(), maybe get dynamic shapes for it by: +# - Create a temporary Tensor whose size is the torch.Size() we want. Note that +# we use an expanded Tensor as we cannot pass "meta" Tensors to make_fx. +# - Pass it to make_fx such that it is is converted to a proxy Tensor +# - Unpack the size in the wrapper to get a torch.Size with dynamic shapes (in +# symbolic mode, a no-op otherwise) +def handle_sizes_for_dynamic_shapes(func, args, kwargs): + def f(args, kwargs, extra_args, extra_kwargs): + if extra_args: + for i, t in extra_args: + args[i] = t.size() + if extra_kwargs: + for k, t in extra_kwargs.items(): + kwargs[k] = t.size() + + return func(*args, **kwargs) + + extra_args = [] + extra_kwargs = {} + for i, arg in enumerate(args): + if isinstance(arg, torch.Size): + extra_args.append((i, torch.empty(arg, device="cpu"))) + for key, value in kwargs.items(): + if isinstance(value, torch.Size): + extra_kwargs[key] = torch.empty(value, device="cpu") + + return f, args, kwargs, extra_args, extra_kwargs + + +def randomize(args): + def transform(x): + if not x.dtype.is_floating_point: + return x + return x.detach().clone().uniform_(0, 1).requires_grad_(x.requires_grad) + return pytree.tree_map_only(torch.Tensor, transform, args) diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__init__.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c708de83c830e0233e6abf1b66ed0a90a1a89fde Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..664fa8e035643e65f6aa649326539b2d6e31f5e1 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/future_div.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecf63e7c20d2967c969eee1a83dc10dc6f7385e2 Binary files /dev/null and b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/__pycache__/no_future_div.cpython-310.pyc differ diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3494f945fad36d84cb8056dcf722d6911f0af2 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/future_div.py @@ -0,0 +1,10 @@ +# mypy: ignore-errors + + + +def div_int_future(): + return 1 / 2 + + +def div_float_future(): + return 3.14 / 0.125 diff --git a/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py new file mode 100644 index 0000000000000000000000000000000000000000..164e6d168414a11039f3b63885760ad08b81ae99 --- /dev/null +++ b/infer_4_30_0/lib/python3.10/site-packages/torch/testing/_internal/test_module/no_future_div.py @@ -0,0 +1,11 @@ +# mypy: ignore-errors + +import torch # noqa: F401 + + +def div_int_nofuture(): + return 1 / 2 + + +def div_float_nofuture(): + return 3.14 / 0.125