diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Half_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Half_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..132bd5e78096e4554637fac3d224db9cc8543ce2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Half_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cast_Half { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Half") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Half(Tensor self, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..79b6de758fd74a0d5c2b9a5fe4c4cd9360c4882c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _conj_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _conj_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ebc1be64f5de8027d5dac1531237890bb423415b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9f05512971df68029c6819e0c7aa2063b1691882 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _dirichlet_grad_out(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out); +TORCH_API at::Tensor _dirichlet_grad_cpu(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total); +TORCH_API at::Tensor _dirichlet_grad_cuda(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add.h new file mode 100644 index 0000000000000000000000000000000000000000..f8e05f14356e85bd1cc2da6bc3c3a2a66847d9cb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add.h @@ -0,0 +1,101 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar::call(self, scalar); +} + +// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_add_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add__Scalar::call(self, scalar); +} + +// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List::call(self, other, alpha); +} + +// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__List::call(self, other, alpha); +} + +// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList::call(self, scalars); +} + +// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_add_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add__ScalarList::call(self, scalars); +} + +// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor::call(self, other, alpha); +} + +// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__Tensor::call(self, other, alpha); +} + +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} + +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} + +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_atan.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_atan.h new file mode 100644 index 0000000000000000000000000000000000000000..34e9132247e0864a678d161fab633a458b0d4b0e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_atan.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_atan(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_atan(at::TensorList self) { + return at::_ops::_foreach_atan::call(self); +} + +// aten::_foreach_atan_(Tensor(a!)[] self) -> () +inline void _foreach_atan_(at::TensorList self) { + return at::_ops::_foreach_atan_::call(self); +} + +// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_atan_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_atan_out::call(self, out); +} +// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_atan_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_atan_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e68f60b1dce3c014f14c3b0806cd6cb7c4cd42 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_adam_ { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adam__tensor_lr { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adam_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adam { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adam_tensor_lr_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adam_tensor_lr { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adam") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7692f49fe87c959c5eb1b1c4f814e647662c77ea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); +TORCH_API ::std::tuple _fused_moving_avg_obs_fq_helper_out(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple fused_moving_avg_obs_fake_quant_cpu(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); +TORCH_API ::std::tuple fused_moving_avg_obs_fake_quant_cuda(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de07de4ddd2228bca220ff6229768c29a2ddec52 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional dim=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_mkldnn_transpose_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_mkldnn_transpose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..662ef11500bbbd9c791bd097c49795858850b53d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_mkldnn_transpose_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _mkldnn_transpose_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); +TORCH_API at::Tensor mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1); +TORCH_API at::Tensor & mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pad_circular.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pad_circular.h new file mode 100644 index 0000000000000000000000000000000000000000..bcc638e121730e312fb2f5b3dd4d174b2b00e383 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pad_circular.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor +inline at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad)); +} +namespace symint { + template ::value>> + at::Tensor _pad_circular(const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::call(self, c10::fromIntArrayRefSlow(pad)); + } +} + +// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor +inline at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad) { + return at::_ops::_pad_circular::call(self, pad); +} +namespace symint { + template ::value>> + at::Tensor _pad_circular(const at::Tensor & self, c10::SymIntArrayRef pad) { + return at::_ops::_pad_circular::call(self, pad); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c800e80f9f285f917babacbc1932fa999131179e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor slow_conv2d_forward_cpu(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & slow_conv2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); +TORCH_API at::Tensor slow_conv2d_forward_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & slow_conv2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f97fd8346a7db1209c56eb6cca777c18e399f04b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1605dd4d03df7fcd7ebe7d678b305fb12adcafd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _test_optional_floatlist(const at::Tensor & values, ::std::optional> addends); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0579a1fdbf5260c85a74eb7adf70fbd17b3304f4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40f9394addfc35d1b57565072b10a2382bfe103d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e90e91a3e1ca8f7c9398c663c497a1b046e226d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & arange_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step); +TORCH_API at::Tensor & arange_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argwhere.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argwhere.h new file mode 100644 index 0000000000000000000000000000000000000000..2d72225712574f0655e174b733ea125d5be80db7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argwhere.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::argwhere(Tensor self) -> Tensor +inline at::Tensor argwhere(const at::Tensor & self) { + return at::_ops::argwhere::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44e3ec7c5c7848bdc2e93db75911380a1a65c102 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor asinh(const at::Tensor & self); +TORCH_API at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asinh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c87507efece85d0baa1fb5d2362546f679cf676f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & batch_norm_backward_elemt_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count, at::Tensor & out); +TORCH_API at::Tensor batch_norm_backward_elemt_cuda(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const ::std::optional & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bdec6e3d71fd5efc00da8086432bdda3ba960679 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautograd_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_or(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/can_cast.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/can_cast.h new file mode 100644 index 0000000000000000000000000000000000000000..e387e4e0ceabacb131ca5134a2cbb645da7cc191 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/can_cast.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::can_cast(ScalarType from_, ScalarType to) -> bool +inline bool can_cast(at::ScalarType from_, at::ScalarType to) { + return at::_ops::can_cast::call(from_, to); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..63b67a9c371ec090619e4ab8970d924a784b140e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim=0); +TORCH_API at::Tensor & cat_out(at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim=0); +TORCH_API at::Tensor & cat_outf(const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ccol_indices_copy_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ccol_indices_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7146b858558df2970d88fcf68de6b6a091e74d64 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ccol_indices_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ccol_indices_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ccol_indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ccol_indices_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API ccol_indices_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ccol_indices_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b42f1dc609ed4c1b890bd47e8fa2ce2dbedd649 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor chalf(const at::Tensor & self, ::std::optional memory_format=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc3a49df65860e9c885dc2a3a451e1c4ff73aac --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor coalesce(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e06afdcd8cf7d593339d8966e79c4b4df8286f30 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/count_nonzero_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f7144b52f5240c70a0041c2de941099c7e31bf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_grid_sampler { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output") + static at::Tensor call(const at::Tensor & self, const at::Tensor & grid); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid); +}; + +struct TORCH_API cudnn_grid_sampler_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/det.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/det.h new file mode 100644 index 0000000000000000000000000000000000000000..44a93668d22086cfb8a9dbd3f128b575f9be396c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/det.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::det(Tensor self) -> Tensor +inline at::Tensor det(const at::Tensor & self) { + return at::_ops::det::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_copy_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..404299ccc2653d5a2a67613e591d1d3de1e40794 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API diagonal_copy { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); +}; + +struct TORCH_API diagonal_copy_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::diagonal_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70d43b3cb05987c6f9241a9bf73e371eada71ddb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h new file mode 100644 index 0000000000000000000000000000000000000000..695380a10a49f0f1b7426068437649a8e91969c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diagonal_scatter_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & diagonal_scatter_out(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +TORCH_API at::Tensor diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/elu_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/elu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..7c6424a81ee14802be342025640cb958528ab458 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/elu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & elu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward_grad_input::call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); +} +// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & elu_backward_outf(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) { + return at::_ops::elu_backward_grad_input::call(grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); +} + +// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor +inline at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fd92f267b467ec3b7cac5677725345c41d90cb5a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +TORCH_API at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..667a2d7bf3b73e359f8509458766d0b5da50c1ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API geometric_ { + using schema = at::Tensor & (at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::geometric_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double p, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional generator); +}; + +struct TORCH_API geometric_out { + using schema = at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::geometric") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API geometric { + using schema = at::Tensor (const at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::geometric") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double p, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9abfc1d4fb5f5547f5e2ce35f8123c7c4c8de949 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_jvp_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e5bb8049724285cc8faf8b1143b840d41ee9d09 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..802b83632fb36510e1859ba333f67df84360298c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igammac_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igammac(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igammac_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igammac_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/inverse.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/inverse.h new file mode 100644 index 0000000000000000000000000000000000000000..43ce59b51f6576b358c9e05055b6d4bcf7920c9e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/inverse.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::inverse(Tensor self) -> Tensor +inline at::Tensor inverse(const at::Tensor & self) { + return at::_ops::inverse::call(self); +} + +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::inverse_out::call(self, out); +} +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & inverse_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::inverse_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7903efd51345cce8adc6ff4af99bfda9969d9a96 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4fc0beb7820d8697067cc8ddcdb246bf6323e5d8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_eigvals_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvals { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eigvals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eigvals(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_eigvals_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eigvals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b74f9aec1da4fc6a5d5060dee1704b5bea050de --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_factor_ex_out : public at::meta::structured_linalg_ldl_factor_ex { +void impl(const at::Tensor & self, bool hermitian, bool check_errors, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10.h new file mode 100644 index 0000000000000000000000000000000000000000..5929193ceacf67f7a12c6c28d6841dcfc6ea0a33 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log10(Tensor self) -> Tensor +inline at::Tensor log10(const at::Tensor & self) { + return at::_ops::log10::call(self); +} + +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log10_(at::Tensor & self) { + return at::_ops::log10_::call(self); +} + +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log10_out::call(self, out); +} +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log10_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..54cf78c85c873f3289c6ae1131aba0425753ec34 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); +} + +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); +} +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::max_pool2d_backward_out::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean.h new file mode 100644 index 0000000000000000000000000000000000000000..c7ed0679d38c03ef35df127e7f4cc2cc4667ad78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean::call(self, dtype); +} + +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_dtype_out::call(self, dtype, out); +} +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_dtype_out::call(self, dtype, out); +} + +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e4e7f57b5913a376d7fa89e447a7d22300fbe1c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mean_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/meshgrid_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/meshgrid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ae79a206c288214e56356ac2da0d0f6618b8a823 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/meshgrid_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API meshgrid { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::meshgrid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "meshgrid(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API meshgrid_indexing { + using schema = ::std::vector (at::TensorList, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::meshgrid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "indexing") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]") + static ::std::vector call(at::TensorList tensors, c10::string_view indexing); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/min.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/min.h new file mode 100644 index 0000000000000000000000000000000000000000..d2baca13949bf6fd029225c54d66a4a79f72f88f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/min.h @@ -0,0 +1,81 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim::call(self, dim, keepdim); +} + +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices); +} +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_dim_min::call(self, dim, keepdim, min, min_indices); +} + +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple min(const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim::call(self, dim, keepdim); +} + +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices); +} +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +inline ::std::tuple min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_names_dim_min::call(self, dim, keepdim, min, min_indices); +} + +// aten::min(Tensor self) -> Tensor +inline at::Tensor min(const at::Tensor & self) { + return at::_ops::min::call(self); +} + +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::min_unary_out::call(self, out); +} +// aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::min_unary_out::call(self, out); +} + +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_out::call(self, other, out); +} +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::min_out::call(self, other, out); +} + +// aten::min.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor min(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_other::call(self, other); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..a84b547b19e076a80dd1f09e8dcd9d04392e5fd4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mse_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss_out::call(self, target, reduction, out); +} +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mse_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::mse_loss_out::call(self, target, reduction, out); +} + +// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +inline at::Tensor mse_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss::call(self, target, reduction); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nanmedian_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nanmedian_native.h new file mode 100644 index 0000000000000000000000000000000000000000..059b1a0f69f9910e64944df08079583851d40810 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nanmedian_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & nanmedian_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor nanmedian_cpu(const at::Tensor & self); +TORCH_API at::Tensor nanmedian_cuda(const at::Tensor & self); +TORCH_API ::std::tuple nanmedian(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple nanmedian_out_cpu(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple nanmedian_out_cuda(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple nanmedian_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b2094e1a0857358494f1adcfd35329181303485c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantize_per_tensor { + using schema = at::Tensor (const at::Tensor &, double, int64_t, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype); +}; + +struct TORCH_API quantize_per_tensor_tensor_qparams { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_qparams") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype); +}; + +struct TORCH_API quantize_per_tensor_tensors { + using schema = ::std::vector (at::TensorList, const at::Tensor &, const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]") + static ::std::vector call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype); +}; + +struct TORCH_API quantize_per_tensor_out { + using schema = at::Tensor & (const at::Tensor &, double, int64_t, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out); +}; + +struct TORCH_API quantize_per_tensor_tensor_qparams_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_qparams_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out); +}; + +struct TORCH_API quantize_per_tensor_tensors_out { + using schema = void (at::TensorList, const at::Tensor &, const at::Tensor &, at::ScalarType, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rad2deg_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rad2deg_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..805558d8427bff383ef6f595d27e927f9c02bb6b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rad2deg_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rad2deg { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API rad2deg_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API rad2deg_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rad2deg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn.h new file mode 100644 index 0000000000000000000000000000000000000000..af214ecf900210c5c9e5a06fe6e0e20ad9b9a766 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn.h @@ -0,0 +1,377 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::randn_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::randn_out::call(size, out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(size, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator) { + return at::_ops::randn_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator) { + return at::_ops::randn_generator_out::call(size, generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(size, generator, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional names) { + return at::_ops::randn_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional names) { + return at::_ops::randn_names_out::call(size, names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(size, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randperm_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randperm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..96820339ec3628828ae0f9eb497fe9acf63e4084 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randperm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & randperm_out(at::Tensor & out, int64_t n, ::std::optional generator); +TORCH_API at::Tensor & randperm_outf(int64_t n, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randperm_symint_out(at::Tensor & out, c10::SymInt n, ::std::optional generator); +TORCH_API at::Tensor & randperm_symint_outf(c10::SymInt n, ::std::optional generator, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..268fcd9ab0852a8228b33204dd4d6c19a84fe40a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & reflection_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/renorm_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/renorm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1467eb00dd6549a13628f5a66adf6a000a41f210 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/renorm_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +TORCH_API at::Tensor & renorm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +TORCH_API at::Tensor & renorm_outf(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out); +TORCH_API at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f428a1d5c816f147904bef6bd61780ad852bce10 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/select_scatter_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index); +TORCH_API at::Tensor select_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7cc3d056dd4a43cbaa84b1766f7feeca81968e81 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sigmoid_backward_out : public at::meta::structured_sigmoid_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1c80a8f4607835f9813d8a371fb6994e4ac6a0a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a628dd8859cb3e9985839a13ab557b0dcb007256 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_entr(const at::Tensor & self); +TORCH_API at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..59e7ef786adbd9e139188dd766572feffb7d3d23 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_erfcx : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e51c50a1a056fb7b8602952fd7720b11f5feced --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_log_ndtr(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..b52322581bbb5a6ef10c71b910d4b544b5b1b9a5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::special_log_softmax::call(self, dim, dtype); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e63e5eb122f6e93169b0ce13982e69e777559629 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k0_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k0_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..146e648fdbe2279624b516e442315abae1805c8c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_zeta : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum.h new file mode 100644 index 0000000000000000000000000000000000000000..8bca11851f694d0231655960d229f0892834c8d0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor sum(const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum::call(self, dtype); +} + +// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype); +} + +// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype); +} + +// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum_IntList_out::call(self, dim, keepdim, dtype, out); +} +// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::sum_IntList_out::call(self, dim, keepdim, dtype, out); +} + +// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum_DimnameList_out::call(self, dim, keepdim, dtype, out); +} +// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out) { + return at::_ops::sum_DimnameList_out::call(self, dim, keepdim, dtype, out); +} + +// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt) { + return at::_ops::sum_out::call(self, dtype, out); +} +// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sum_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out) { + return at::_ops::sum_out::call(self, dtype, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/svd.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/svd.h new file mode 100644 index 0000000000000000000000000000000000000000..cd066913b1c40a2982afe7a5c60a41452e912512 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/svd.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) +inline ::std::tuple svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd_U::call(self, some, compute_uv, U, S, V); +} +// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) +inline ::std::tuple svd_outf(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { + return at::_ops::svd_U::call(self, some, compute_uv, U, S, V); +} + +// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +inline ::std::tuple svd(const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd::call(self, some, compute_uv); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csc_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4d70f49333a0dbc3c0caadb0dbee43b8d2104e73 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csc_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor to_sparse_csc(const at::Tensor & self, ::std::optional dense_dim=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/transpose_copy.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/transpose_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..777ff5ef356952c9591f752aa5abdbdcf80d34d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/transpose_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor +inline at::Tensor transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int::call(self, dim0, dim1); +} + +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & transpose_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out); +} +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & transpose_copy_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1792b2da9fcb5c12b812b19eba1a0ec119781413 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea664f3c1403ff9bc1422ed865f97165ae4a461 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h new file mode 100644 index 0000000000000000000000000000000000000000..ff262fbb8d32d55c4172438926901bf79123e459 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*output_size)) : ::std::nullopt, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*output_size)) : ::std::nullopt, align_corners, scale_factors); + } +} + +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } +} + +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, c10::fromIntArrayRefSlow(output_size), align_corners, scales_d, scales_h, scales_w); + } +} + +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_trilinear3d(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca57511cb42bc27bd05e434a80937e49584a362c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor view_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xlogy.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xlogy.h new file mode 100644 index 0000000000000000000000000000000000000000..e9e22a2d848a466946b04961df178bf1cd2334af --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xlogy.h @@ -0,0 +1,77 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_Tensor::call(self, other); +} + +// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor +inline at::Tensor xlogy(const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_Scalar_Self::call(self, other); +} + +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +inline at::Tensor xlogy(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_Scalar_Other::call(self, other); +} + +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy__Tensor::call(self, other); +} + +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & xlogy_(at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy__Scalar_Other::call(self, other); +} + +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_OutTensor::call(self, other, out); +} +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutTensor::call(self, other, out); +} + +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_OutScalar_Self::call(self, other, out); +} +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Self::call(self, other, out); +} + +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_OutScalar_Other::call(self, other, out); +} +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & xlogy_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Other::call(self, other, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..73289b3182d5aa422910188268e280c1af4c936f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ixor__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ixor__(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at