diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8de0067cec8116c0f4e5b16ad93fd37c6cb46420 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a40f8edfc1397152c931fc33f076f8f658621373 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h new file mode 100644 index 0000000000000000000000000000000000000000..9ded01bb61b1398be9444ea94a5c6226fcd7fc0a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cslt_compress(Tensor input) -> Tensor +inline at::Tensor _cslt_compress(const at::Tensor & input) { + return at::_ops::_cslt_compress::call(input); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c8b0c830ea0ca01f9d918eb6541ba63b7f79532b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_init_dropout_state_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cudnn_init_dropout_state { + using schema = at::Tensor (double, bool, int64_t, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_init_dropout_state") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(double dropout, bool train, int64_t dropout_seed, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API _cudnn_init_dropout_state_out { + using schema = at::Tensor & (double, bool, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_init_dropout_state") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(double dropout, bool train, int64_t dropout_seed, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..25d577c9e9c8f1ddc4d729e935c3923dad363ca4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _debug_has_internal_overlap { + using schema = int64_t (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_debug_has_internal_overlap") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_debug_has_internal_overlap(Tensor self) -> int") + static int64_t call(const at::Tensor & self); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f64c4bbb3192b23d6b1538c29edacbd40d004f47 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_efficientzerotensor_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _efficientzerotensor(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6bc1b277f36cd3ffda42b4e5d583e64b2f4a1232 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional memory_format=c10::MemoryFormat::Contiguous); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_outf(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional memory_format=c10::MemoryFormat::Contiguous); +TORCH_API at::Tensor & _empty_per_channel_affine_quantized_symint_outf(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h new file mode 100644 index 0000000000000000000000000000000000000000..b790fb66899d058b1f6d5c95104d00d71ca58d03 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_erf(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_erf(at::TensorList self) { + return at::_ops::_foreach_erf::call(self); +} + +// aten::_foreach_erf_(Tensor(a!)[] self) -> () +inline void _foreach_erf_(at::TensorList self) { + return at::_ops::_foreach_erf_::call(self); +} + +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erf_out::call(self, out); +} +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erf_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..4311a6986417c9fe39fc9addc09284be6acaa9d0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor +inline at::Tensor _functional_assert_scalar(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c1cb1e3b98461d927bf2bbde0318c752823e349e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _fw_primal(const at::Tensor & self, int64_t level); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f7c9ed1a692caaad53f4998ae533a8a2686e89e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_has_same_storage_numel_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool _has_same_storage_numel(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7fda9572c3d229e49dc94c440cf196662f650104 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _is_any_true { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_is_any_true") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_is_any_true(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be9397ca319d0477f74324049492a08db5782674 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths={}, const ::std::optional & offsets={}, int64_t axis=0, const ::std::optional & initial=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6f63249ba27d001ac1b2bb6143b8a9506680689e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sparse_csr_sum_dim_dtype_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor _sparse_csr_sum_cpu(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor _sparse_csr_sum_cuda(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51ad69c01007f968798b6a9a394ae92c021d0283 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _sparse_mm_reduce_impl_backward_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acosh.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acosh.h new file mode 100644 index 0000000000000000000000000000000000000000..ce02d1865c4fcdac31d0d01e98859e2fe3265060 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acosh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::acosh(Tensor self) -> Tensor +inline at::Tensor acosh(const at::Tensor & self) { + return at::_ops::acosh::call(self); +} + +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & acosh_(at::Tensor & self) { + return at::_ops::acosh_::call(self); +} + +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::acosh_out::call(self, out); +} +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::acosh_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1b9b1284d4cdb8ed7250b54795c5434db44098d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor align_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5062377415a33ba2d9a2bccbf10f104662415123 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor all(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor all(const at::Tensor & self); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..5e9f2fa48a701404bd22789361e971fdc354ddbd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); + } +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..909f8b432177a9b0a6fb36deb34d715e72276701 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API atan2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API atan2_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API atan2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bd188a1db15bd775d7ee4484da219c72babd7121 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bilinear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bilinear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const ::std::optional & bias); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d580df33865f7a34ac9a1048466bd04b19fd0083 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_right_shift_out : public at::meta::structured_bitwise_right_shift_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24f873e339c639ea39f9091d4431e8871ec1ca47 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e3a28ac29771b2911d291cb10bde7454c7b7a50d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & ccol_indices_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor ccol_indices_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/complex_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/complex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..85947adb6e2a6ae1395d78efcd786ad35e894123 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/complex_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API complex { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::complex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "complex(Tensor real, Tensor imag) -> Tensor") + static at::Tensor call(const at::Tensor & real, const at::Tensor & imag); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag); +}; + +struct TORCH_API complex_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::complex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..549275fceee5fd8b179f30846aeaca13f3f451f9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_copysign_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6e9c94bf0f1d020988d599383a8df92b3ae444fa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t dense_dim_default(const at::Tensor & self); +TORCH_API int64_t dense_dim_sparse(const at::Tensor & self); +TORCH_API int64_t dense_dim_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..06db87aa42c9ca4b6e9ee1fc82d69d5ff392f378 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::detach_copy(Tensor self) -> Tensor +inline at::Tensor detach_copy(const at::Tensor & self) { + return at::_ops::detach_copy::call(self); +} + +// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & detach_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::detach_copy_out::call(self, out); +} +// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::detach_copy_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..495f2ab05680a1c0fb2b4cd003b284bb179ca3e8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & detach_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor detach_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..07495a5f2b2497d574269740413d740963190790 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_scatter_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diagonal_scatter { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_scatter") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); +}; + +struct TORCH_API diagonal_scatter_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_scatter") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h new file mode 100644 index 0000000000000000000000000000000000000000..b3158f8e0f6000a6f27188547f1c98ce099f9d58 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +inline at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}) { + return at::_ops::diff::call(self, n, dim, prepend, append); +} + +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}) { + return at::_ops::diff_out::call(self, n, dim, prepend, append, out); +} +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional & prepend, const ::std::optional & append, at::Tensor & out) { + return at::_ops::diff_out::call(self, n, dim, prepend, append, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b885958f472fffff415cfc0ccc51b5b8cf4a2486 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API eye { + using schema = at::Tensor (c10::SymInt, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt n, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API eye_m { + using schema = at::Tensor (c10::SymInt, c10::SymInt, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "m") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt n, c10::SymInt m, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API eye_out { + using schema = at::Tensor & (c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out); +}; + +struct TORCH_API eye_m_out { + using schema = at::Tensor & (c10::SymInt, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "m_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt n, c10::SymInt m, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65a4fed52b797d1456abe5dd7e028e46fe6ca369 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h new file mode 100644 index 0000000000000000000000000000000000000000..c757dc745e05e1a963fd235d6b2856c589d871b9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); + } +} + +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_fftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fftn(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn::call(self, s, dim, norm); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bcc949dab627526156e9513e07d68b05fc9f1b74 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_floor_out : public at::meta::structured_floor { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor floor_sparse(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_(at::Tensor & self); +TORCH_API at::Tensor floor_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h new file mode 100644 index 0000000000000000000000000000000000000000..853f512320b0ae162b3a3b73e72e41d6449c4091 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor +inline at::Tensor fused_moving_avg_obs_fake_quant(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::fused_moving_avg_obs_fake_quant::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2eaf088f78d2f5ffc8ab1476fb0744f2a420d36b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); +}; + +struct TORCH_API gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gelu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b006e07f55c8c3dd95c6721f5c2f56514895b5ff --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API grid_sampler_3d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::grid_sampler_3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +}; + +struct TORCH_API grid_sampler_3d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::grid_sampler_3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d7fd12cbd8c46f3186a4ee7af3712b0de287754b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_gt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2adb1b7959e5e9097e388c0ea27506b7ffd47677 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a8e6e18b645ef9781acbf9d05c463c6a431d6c83 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardsigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardsigmoid_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..66b5eed6d99d6fe0b34fc7d1a24ed8d4cb1eb27e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histc_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API histc { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..519b18dc7c7f6d15a70bbc1203402f0481044d18 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_inference { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_inference") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_inference(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a88b448c4c8ddbc0676aa839e532d2016f13c1d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_neg(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cfd3090e53a8d0cad5c4e8e8a8d390ba961f1b41 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/istft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..7320d3e90145e1961c057db606771dc5f062acbd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,79 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..309e0224b5f46cd2ea57d6591ad8fee0d8a3b620 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +TORCH_API at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +TORCH_API at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +TORCH_API at::Tensor & lerp_(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h new file mode 100644 index 0000000000000000000000000000000000000000..980a1214670c303be32d9b30bd7f52477f15e7e0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor +inline at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::call(self, upper); +} + +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f985849d14f17f9ad59f04cb65dd5c22ba7d04c1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_ex_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_solve_ex_out(at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false); +TORCH_API ::std::tuple linalg_solve_ex_outf(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h new file mode 100644 index 0000000000000000000000000000000000000000..98ea0acf60156f01659b53c609e25bf3a096636a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp_out::call(self, other, out); +} +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & logaddexp_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp_out::call(self, other, out); +} + +// aten::logaddexp(Tensor self, Tensor other) -> Tensor +inline at::Tensor logaddexp(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp::call(self, other); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72d39c3857c00141c3082534657b3e275cb15c7d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cb106bc85639df0a72a06927a60a95d2d0990068 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..17c978b9216d141e37778da5babe04ddd9f679af --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor mkldnn_adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & mkldnn_adaptive_avg_pool2d_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..71ca4c53dfbd59e3066353660fd6d56ddf39b7c3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mkldnn_linear_backward_out(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple mkldnn_linear_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h new file mode 100644 index 0000000000000000000000000000000000000000..144a982f1f602bb6bebe4a9e5175482f3be77a11 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) +inline ::std::tuple mkldnn_linear_backward_weights(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined); +} + +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1); +} +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mkldnn_linear_backward_weights_out::call(grad_output, input, weight, bias_defined, out0, out1); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61244fc51a5b06a0f955b01f8da4d29f9d2d0095 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_max_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API mkldnn_max_pool3d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight.h new file mode 100644 index 0000000000000000000000000000000000000000..2df2dfb9927151167e3560bb3ecd1e9124c9878d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_reorder_conv3d_weight.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor +inline at::Tensor mkldnn_reorder_conv3d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_reorder_conv3d_weight(const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt); + } +} + +// aten::mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor +inline at::Tensor mkldnn_reorder_conv3d_weight_symint(const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups, input_size); +} +namespace symint { + template ::value>> + at::Tensor mkldnn_reorder_conv3d_weight(const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups, input_size); + } +} + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1, at::OptionalIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt, out); + } +} + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::OptionalIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, input_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*input_size)) : ::std::nullopt, out); + } +} + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv3d_weight_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv3d_weight_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1, at::OptionalSymIntArrayRef input_size=::std::nullopt) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); + } +} + +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_reorder_conv3d_weight_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); +} +namespace symint { + template ::value>> + at::Tensor & mkldnn_reorder_conv3d_weight_outf(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::call(self, padding, stride, dilation, groups, input_size, out); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..185434c66554c7fa8089ff02b69c066a9d3a2af6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_rnn_layer_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); +}; + +struct TORCH_API mkldnn_rnn_layer_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fee6727424a90532d2288f51c35b8d7ec01f47a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..73f6fcfe4724d4231bea91ae272bac572f79bd33 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_mse_loss_out : public at::meta::structured_mse_loss { +void impl(const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ceadde57989a6877f2836ee78231d050a0f7d8e9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss2d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & out); +}; + +struct TORCH_API nll_loss2d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b408416c4d73e00d093a0b3684e8a6a05ce9219c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pad_sequence { + using schema = at::Tensor (at::TensorList, bool, double, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pad_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side=\"right\") -> Tensor") + static at::Tensor call(at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first, double padding_value, c10::string_view padding_side); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h new file mode 100644 index 0000000000000000000000000000000000000000..99d796f91b5af700b19e1e4173909475f024b541 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_airy_ai(Tensor x) -> Tensor +inline at::Tensor special_airy_ai(const at::Tensor & x) { + return at::_ops::special_airy_ai::call(x); +} + +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_airy_ai_out(at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_airy_ai_out::call(x, out); +} +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_airy_ai_outf(const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_airy_ai_out::call(x, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..34e28d9e1a1dfa3bf6fe2b5e7d7f16777f75c78c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_digamma(const at::Tensor & self); +TORCH_API at::Tensor & special_digamma_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7f43603eed27900b4c285d2789fe0073f63d88c9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_xlog1py : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42ccabac08f7b4c28ed607bbaa8901852d547c3d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sqrt_out : public at::meta::structured_sqrt { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor sqrt_sparse(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sqrt_sparse_(at::Tensor & self); +TORCH_API at::Tensor sqrt_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sqrt_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00ca6dfc42be8d1c47723a96f188a37f2a0d3d41 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor squeeze(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/stft.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/stft.h new file mode 100644 index 0000000000000000000000000000000000000000..694eb0fda6980ef378a4fe6b39ef337d8546339f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/stft.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor stft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length, ::std::optional win_length, const ::std::optional & window, bool normalized, ::std::optional onesided=::std::nullopt, ::std::optional return_complex=::std::nullopt) { + return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); +} + +// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor stft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional return_complex=::std::nullopt) { + return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1313cbbe50466fcdf123d3a6232c3a901cc78342 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_sparse_csc { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_sparse_csc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dense_dim); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fbfeb0c4047681ade10cf61c1408749b6fdece2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..287d5d336a5a40a60bb1cf54a0174cd2c3296b39 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9232b313ef51ad00e0dfe56d4fab1f94ab2ccb9a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_bicubic2d_backward_out_cpu : public at::meta::structured_upsample_bicubic2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_bicubic2d_backward_out_cuda : public at::meta::structured_upsample_bicubic2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..585e8a0bf63a6d646ce80c0ad35c797101a1c452 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_bilinear2d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, ::std::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +}; + +struct TORCH_API upsample_bilinear2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_bilinear2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..616a4d16c5c105c73a9342d049c768e27a549f54 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_as_real_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API view_as_real_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f47c36a91f2d70d6fc347e2b9940a4ab754202a7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at