text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
from . import functional
| ivy/ivy/functional/frontends/paddle/nn/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/__init__.py",
"repo_id": "ivy",
"token_count": 5
} | 32 |
# local
from ..attribute import * # noqa: F401
| ivy/ivy/functional/frontends/paddle/tensor/attribute.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/attribute.py",
"repo_id": "ivy",
"token_count": 16
} | 33 |
import ivy
import ivy.functional.frontends.pandas.series as series
from typing import Iterable
class Index:
def __init__(self, data, dtype=None, copy=False, name=None, tupleize_cols=True):
self.index = data
self.tokens = None
if not isinstance(data, ivy.Array):
try:
self.index_array = ivy.array(data, dtype=dtype)
except ivy.utils.exceptions.IvyBackendException:
# labels as strings
if isinstance(data, (list, tuple)):
self.tokens = data
self.index_array = Index._tokenize_1d(data)
else:
# todo: handle other cases
raise NotImplementedError
else:
self.index_array = data
self.tokens_exist = self.tokens is not None
self.dtype = dtype
self.name = name
self.copy = copy
self.tupleize_cols = tupleize_cols
@staticmethod
def _tokenize_1d(x: Iterable):
return ivy.array([v for v, _ in enumerate(x)])
def __repr__(self):
if self.tokens_exist:
return f"Index({list(self.tokens)})"
return f"Index({self.index_array.to_list()})"
def __getitem__(self, item):
if self.tokens_exist:
if isinstance(item, (list, tuple)):
return Index(self.tokens[item])
return self.tokens[item]
elif isinstance(item, (list, tuple)):
return Index(self.index_array[item])
return self.index_array[item]
def __len__(self):
return len(self.index_array)
def __iter__(self):
return iter(self.index_array.to_list())
@property
def ndim(self):
return self.index_array.ndim
@property
def size(self):
return self.index_array.size
@property
def array(self):
return self.index_array
@property
def shape(self):
return tuple(self.index_array.shape)
@property
def has_duplicates(self):
return not self.is_unique()
def unique(self, level=None):
# todo handle level with mutliindexer
self.index_array = ivy.unique_values(self)
return Index(self.index_array, dtype=self.dtype, copy=self.copy, name=self.name)
def is_unique(self):
uniques = ivy.unique_values(self)
return len(uniques) == len(self.index_array)
def to_list(self):
return self.index_array.to_list()
def to_numpy(self, dtype=None, copy=False, na_value=ivy.nan, **kwargs):
if dtype:
return self.index_array.astype(dtype).to_numpy(copy=copy)
return self.index_array.to_numpy(copy=copy)
def to_series(self, index=None, name=None):
if index is None:
index = self.index_array
return series.Series(index, index=index, name=name)
def min(self, axis=None, skipna=True, *args, **kwargs):
return self.index_array.min()
def max(self, axis=None, skipna=True, *args, **kwargs):
return self.index_array.max()
def isin(self, values, level=None):
# todo handle level with mutliindexer
return ivy.isin(self.index_array, values)
| ivy/ivy/functional/frontends/pandas/index.py/0 | {
"file_path": "ivy/ivy/functional/frontends/pandas/index.py",
"repo_id": "ivy",
"token_count": 1497
} | 34 |
from . import _samples_generator
from ._samples_generator import *
| ivy/ivy/functional/frontends/sklearn/datasets/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/datasets/__init__.py",
"repo_id": "ivy",
"token_count": 20
} | 35 |
# global
import sys
# local
import ivy
from ivy.utils.exceptions import handle_exceptions
from ivy.functional.frontends import set_frontend_to_specific_version
from numbers import Number
from typing import Union, Tuple, Iterable
from .dtypes import DType
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
tensorflow_enum_to_type = {
1: ivy.FloatDtype("float32"),
2: ivy.FloatDtype("float64"),
3: ivy.IntDtype("int32"),
4: ivy.UintDtype("uint8"),
5: ivy.IntDtype("int16"),
6: ivy.IntDtype("int8"),
8: ivy.ComplexDtype("complex64"),
9: ivy.IntDtype("int64"),
10: ivy.Dtype("bool"),
14: ivy.FloatDtype("bfloat16"),
17: ivy.UintDtype("uint16"),
18: ivy.ComplexDtype("complex128"),
19: ivy.FloatDtype("float16"),
22: ivy.UintDtype("uint32"),
23: ivy.UintDtype("uint64"),
}
tensorflow_type_to_enum = {v: k for k, v in tensorflow_enum_to_type.items()}
float32 = DType(1)
float64 = DType(2)
int32 = DType(3)
uint8 = DType(4)
int16 = DType(5)
int8 = DType(6)
int64 = DType(9)
bool = DType(10)
bfloat16 = DType(14)
uint16 = DType(17)
float16 = DType(19)
uint32 = DType(22)
uint64 = DType(23)
# type aliases
double = float64
half = float16
@handle_exceptions
def check_tensorflow_casting(x1, x2):
"""Check whether the two arguments provided in the function have the same
dtype, unless one of them is an array_like or scalar, where it gets casted
to the other input's dtype.
Parameters
----------
x1
First argument which can be tensor, array_like or scalar
x2
Second argument which can be tensor, array_like or scalar
Returns
-------
x1
First tensor promoted accordingly.
x2
Second tensor promoted accordingly.
"""
if hasattr(x1, "dtype") and not hasattr(x2, "dtype"):
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2, dtype=x1.dtype)
elif hasattr(x2, "dtype") and not hasattr(x1, "dtype"):
x2 = ivy.asarray(x2)
x1 = ivy.asarray(x1, dtype=x2.dtype)
else:
x1 = ivy.asarray(x1)
if not hasattr(x2, "dtype"):
x2 = ivy.asarray(x2, dtype=x1.dtype)
ivy.utils.assertions.check_same_dtype(x1, x2)
return x1, x2
from . import dtypes
from .dtypes import as_dtype, cast
from . import ragged
from .ragged import *
from . import tensor
from .tensor import EagerTensor, Tensor
from .tensorarray import TensorArray
from . import variable
from .variable import Variable, IndexedSlices
from .python.ops.resource_variable_ops import ResourceVariable
from . import keras
from . import compat
from . import image
from . import linalg
from .linalg import matmul, tensordot, eig, eye, norm
from . import math
from .math import *
from . import nest
from . import nn
from . import __operators__
from . import quantization
from . import random
from . import general_functions
from .general_functions import *
from . import raw_ops
from . import sets
from . import signal
from . import sparse
_frontend_array = constant
# setting to specific version #
# --------------------------- #
if ivy.is_local():
module = ivy.utils._importlib.import_cache[__name__]
else:
module = sys.modules[__name__]
__version__ = set_frontend_to_specific_version(module)
| ivy/ivy/functional/frontends/tensorflow/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/__init__.py",
"repo_id": "ivy",
"token_count": 1308
} | 36 |
# global
# local
import ivy
import ivy.functional.frontends.tensorflow as tf_frontend
class Variable:
def __init__(self, array, trainable=True, name=None, dtype=None):
self._ivy_array = (
ivy.array(array) if not isinstance(array, ivy.Array) else array
)
self._ivy_array = (
ivy.astype(self._ivy_array, dtype) if dtype is not None else self._ivy_array
)
self.trainable = trainable
def __repr__(self):
return (
repr(self._ivy_array).replace(
"ivy.array", "ivy.frontends.tensorflow.Variable"
)[:-1]
+ ", shape="
+ str(self._ivy_array.shape)
+ ", dtype="
+ str(self._ivy_array.dtype)
+ ")"
)
# Properties #
# ---------- #
@property
def ivy_array(self):
return self._ivy_array
@property
def device(self):
return self._ivy_array.device
@property
def dtype(self):
return tf_frontend.DType(
tf_frontend.tensorflow_type_to_enum[self._ivy_array.dtype]
)
@property
def shape(self):
return self._ivy_array.shape
# Instance Methods #
# ---------------- #
def assign(self, value, use_locking=None, name=None, read_value=True):
ivy.utils.assertions.check_equal(
value.ivy_array.shape if hasattr(value, "ivy_array") else ivy.shape(value),
self.shape,
as_array=False,
)
self._ivy_array = value._ivy_array
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
ivy.utils.assertions.check_equal(
delta.ivy_array.shape if hasattr(delta, "ivy_array") else ivy.shape(delta),
self.shape,
as_array=False,
)
self._ivy_array = tf_frontend.math.add(self._ivy_array, delta._ivy_array)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
ivy.utils.assertions.check_equal(
delta.ivy_array.shape if hasattr(delta, "ivy_array") else ivy.shape(delta),
self.shape,
as_array=False,
)
self._ivy_array = tf_frontend.math.subtract(self._ivy_array, delta._ivy_array)
def batch_scatter_update(
self, sparse_delta, use_locking=None, name=None, read_value=True
):
pass
def gather_nd(self, indices, name=None):
return tf_frontend.gather_nd(params=self._ivy_array, indices=indices)
def read_value(self):
return tf_frontend.Tensor(self._ivy_array)
def scatter_add(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_div(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_max(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_min(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_mul(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_nd_add(self, indices, updates, use_locking=None, name=None):
pass
def scatter_nd_sub(self, indices, updates, use_locking=None, name=None):
pass
def scatter_nd_update(self, indices, updates, use_locking=None, name=None):
pass
def scatter_sub(self, sparse_delta, use_locking=None, name=None, read_value=True):
pass
def scatter_update(
self, sparse_delta, use_locking=None, name=None, read_value=True
):
pass
def set_shape(self, shape):
if shape is None:
return
x_shape = self._ivy_array.shape
if len(x_shape) != len(shape):
raise ValueError(
f"Tensor's shape {x_shape} is not compatible with supplied shape "
f"{shape}."
)
for i, v in enumerate(x_shape):
if v != shape[i] and (shape[i] is not None):
raise ValueError(
f"Tensor's shape {x_shape} is not compatible with supplied shape "
f"{shape}."
)
def get_shape(self):
return self._ivy_array.shape
def sparse_read(self, indices, name=None):
pass
def __add__(self, y, name="add"):
return self.__radd__(y)
def __div__(self, x, name="div"):
return tf_frontend.math.divide(x, self._ivy_array, name=name)
def __and__(self, y, name="and"):
return y.__rand__(self._ivy_array)
def __eq__(self, other):
return tf_frontend.raw_ops.Equal(
x=self._ivy_array, y=other, incompatible_shape_error=False
)
def __floordiv__(self, y, name="floordiv"):
return y.__rfloordiv__(self._ivy_array)
def __ge__(self, y, name="ge"):
return tf_frontend.raw_ops.GreaterEqual(
x=self._ivy_array, y=y._ivy_array, name=name
)
def __getitem__(self, slice_spec, var=None, name="getitem"):
ret = ivy.get_item(self._ivy_array, slice_spec)
return Variable(ivy.array(ret, dtype=ivy.dtype(ret), copy=False))
def __gt__(self, y, name="gt"):
return tf_frontend.raw_ops.Greater(x=self._ivy_array, y=y._ivy_array, name=name)
def __invert__(self, name="invert"):
return tf_frontend.raw_ops.Invert(x=self._ivy_array, name=name)
def __le__(self, y, name="le"):
return tf_frontend.raw_ops.LessEqual(
x=self._ivy_array, y=y._ivy_array, name=name
)
def __lt__(self, y, name="lt"):
return tf_frontend.raw_ops.Less(x=self._ivy_array, y=y._ivy_array, name=name)
def __matmul__(self, y, name="matmul"):
return y.__rmatmul__(self._ivy_array)
def __mul__(self, x, name="mul"):
return tf_frontend.math.multiply(x, self._ivy_array, name=name)
def __mod__(self, x, name="mod"):
return tf_frontend.math.mod(x, self._ivy_array, name=name)
def __ne__(self, other):
return tf_frontend.raw_ops.NotEqual(
x=self._ivy_array, y=other._ivy_array, incompatible_shape_error=False
)
def __neg__(self, name="neg"):
return tf_frontend.raw_ops.Neg(x=self._ivy_array, name=name)
def __or__(self, y, name="or"):
return y.__ror__(self._ivy_array)
def __pow__(self, y, name="pow"):
return tf_frontend.math.pow(x=self, y=y, name=name)
def __radd__(self, x, name="radd"):
return tf_frontend.math.add(x, self._ivy_array, name=name)
def __rand__(self, x, name="rand"):
return tf_frontend.math.logical_and(x, self._ivy_array, name=name)
def __rfloordiv__(self, x, name="rfloordiv"):
return tf_frontend.raw_ops.FloorDiv(x=x, y=self._ivy_array, name=name)
def __rmatmul__(self, x, name="rmatmul"):
return tf_frontend.raw_ops.MatMul(a=x, b=self._ivy_array, name=name)
def __rmul__(self, x, name="rmul"):
return tf_frontend.raw_ops.Mul(x=x, y=self._ivy_array, name=name)
def __ror__(self, x, name="ror"):
return tf_frontend.raw_ops.LogicalOr(x=x, y=self._ivy_array, name=name)
def __rpow__(self, x, name="rpow"):
return tf_frontend.raw_ops.Pow(x=x, y=self._ivy_array, name=name)
def __rsub__(self, x, name="rsub"):
return tf_frontend.math.subtract(x, self._ivy_array, name=name)
def __rtruediv__(self, x, name="rtruediv"):
return tf_frontend.math.truediv(x, self._ivy_array, name=name)
def __rxor__(self, x, name="rxor"):
return tf_frontend.math.logical_xor(x, self._ivy_array, name=name)
def __sub__(self, y, name="sub"):
return y.__rsub__(self._ivy_array)
def __truediv__(self, y, name="truediv"):
dtype = ivy.dtype(self._ivy_array)
if dtype in [ivy.uint8, ivy.int8, ivy.uint16, ivy.int16]:
return ivy.astype(y, ivy.float32).__rtruediv__(
ivy.astype(self._ivy_array, ivy.float32)
)
if dtype in [ivy.uint32, ivy.int32, ivy.uint64, ivy.int64]:
return ivy.astype(y, ivy.float64).__rtruediv__(
ivy.astype(self._ivy_array, ivy.float64)
)
return y.__rtruediv__(self._ivy_array)
def __xor__(self, y, name="xor"):
return y.__rxor__(self._ivy_array)
def __setitem__(self, key, value):
raise ivy.utils.exceptions.IvyException(
"ivy.functional.frontends.tensorflow.Variable object "
"doesn't support assignment"
)
class IndexedSlices:
def __init__(self, values, indices, dense_shape=None):
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense
tensor."""
return self._dense_shape
@property
def device(self):
"""The name of the device on which `values` will be produced, or
`None`."""
return self.values.device
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
def __repr__(self):
return "IndexedSlices(\nindices=%s,\nvalues=%s%s\n)" % (
self._indices,
self._values,
(
f", dense_shape={self._dense_shape}"
if self._dense_shape is not None
else ""
),
)
def __neg__(self):
return IndexedSlices(-self._values, self._indices, self._dense_shape)
| ivy/ivy/functional/frontends/tensorflow/variable.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/variable.py",
"repo_id": "ivy",
"token_count": 4711
} | 37 |
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
# ToDo: this function will be simplified once ivy.alpha_dropout is implemented
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def alpha_dropout(input, p=0.5, training=False, inplace=False):
if p == 0.0 or not training or input.shape == () or input.shape == (0,):
return input
neg_saturation = ivy.log1p(ivy.exp(-ivy.square(input)))
mask = ivy.where(
ivy.random_uniform(shape=input.shape, device=ivy.dev(input)) < p,
0.0,
1.0,
)
if inplace:
ivy.inplace_update(input, mask * input + (1 - mask) * neg_saturation)
ivy.inplace_update(input, input / ivy.sqrt(1 - p / (1 - p + 1e-5)))
return input
else:
masked = mask * input + (1 - mask) * neg_saturation
return masked / ivy.sqrt(1 - p / (1 - p + 1e-5))
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def dropout(input, p=0.5, training=True, inplace=False):
return ivy.dropout(input, p, scale=True, training=training)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def dropout1d(input, p=0.5, training=True, inplace=False):
if inplace:
return ivy.dropout1d(input, p, training=training, data_format="NCW", out=input)
return ivy.dropout1d(input, p, training=training, data_format="NCW")
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, "torch")
def dropout2d(input, p=0.5, training=True, inplace=False):
if input.ndim < 2:
raise ValueError("Feature dropout requires at least 2 dimensions in the input")
ret = ivy.dropout2d(input, p, training=training, data_format="NCHW")
if inplace:
ivy.inplace_update(input, ret)
return input
return ret
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def dropout3d(input, p=0.5, training=True, inplace=False):
if inplace:
return ivy.dropout3d(
input, p, training=training, data_format="NDHWC", out=input
)
return ivy.dropout3d(input, p, training=training, data_format="NDHWC")
| ivy/ivy/functional/frontends/torch/nn/functional/dropout_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/dropout_functions.py",
"repo_id": "ivy",
"token_count": 1023
} | 38 |
import ivy
from ivy.func_wrapper import (
with_unsupported_dtypes,
)
from ivy.functional.frontends.torch.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
@to_ivy_arrays_and_back
def erfc(input, *, out=None):
return 1.0 - ivy.erf(input, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def erfcx(input, *, out=None):
ret = erfc(input) * ivy.exp(input**2)
return ret
@to_ivy_arrays_and_back
def erfinv(input, *, out=None):
return ivy.erfinv(input, out=out)
| ivy/ivy/functional/frontends/torch/special/special_funcs.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/special/special_funcs.py",
"repo_id": "ivy",
"token_count": 276
} | 39 |
import ivy
from ivy.functional.frontends.sklearn.base import BaseEstimator as XGBModelBase
from ivy.functional.frontends.sklearn.base import ClassifierMixin as XGBClassifierBase
from .training import train
from .core import Booster
class XGBModel(XGBModelBase):
def __init__(
self,
max_depth=None,
max_leaves=None,
max_bin=None,
grow_policy=None,
learning_rate=None,
n_estimators=None,
verbosity=None,
objective=None,
booster=None,
tree_method=None,
n_jobs=None,
gamma=None,
min_child_weight=None,
max_delta_step=None,
subsample=None,
sampling_method=None,
colsample_bytree=None,
colsample_bylevel=None,
colsample_bynode=None,
reg_alpha=None,
reg_lambda=None,
scale_pos_weight=None,
base_score=None,
random_state=None,
missing=None,
num_parallel_tree=None,
monotone_constraints=None,
interaction_constraints=None,
importance_type=None,
device=None,
validate_parameters=None,
enable_categorical=False,
feature_types=None,
max_cat_to_onehot=None,
max_cat_threshold=None,
multi_strategy=None,
eval_metric=None,
early_stopping_rounds=None,
callbacks=None,
**kwargs,
):
self.n_estimators = n_estimators
self.objective = objective
self.max_depth = max_depth
self.max_leaves = max_leaves
self.max_bin = max_bin
self.grow_policy = grow_policy
self.learning_rate = learning_rate
self.verbosity = verbosity
self.booster = booster
self.tree_method = tree_method
self.gamma = gamma
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.sampling_method = sampling_method
self.colsample_bytree = colsample_bytree
self.colsample_bylevel = colsample_bylevel
self.colsample_bynode = colsample_bynode
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.base_score = base_score
self.missing = missing
self.num_parallel_tree = num_parallel_tree
self.random_state = random_state
self.n_jobs = n_jobs
self.monotone_constraints = monotone_constraints
self.interaction_constraints = interaction_constraints
self.importance_type = importance_type
self.device = device
self.validate_parameters = validate_parameters
self.enable_categorical = enable_categorical
self.feature_types = feature_types
self.max_cat_to_onehot = max_cat_to_onehot
self.max_cat_threshold = max_cat_threshold
self.multi_strategy = multi_strategy
self.eval_metric = eval_metric
self.early_stopping_rounds = early_stopping_rounds
self.callbacks = callbacks
self.compiled = False
if kwargs:
self.kwargs = kwargs
def __sklearn_is_fitted__(self):
return hasattr(self, "_Booster")
def get_booster(self):
"""Get the underlying xgboost Booster of this model. This will raise an
exception when fit was not called.
Returns
-------
booster : a xgboost booster of underlying model
"""
if not self.__sklearn_is_fitted__():
raise TypeError("need to call fit or load_model beforehand")
return self._Booster
def get_params(self, deep=True):
params = self.__dict__
# if kwargs is a dict, update params accordingly
if hasattr(self, "kwargs") and isinstance(self.kwargs, dict):
params.update(self.kwargs)
# take random_state into account only if it's an integer
if isinstance(params["random_state"], int):
ivy.seed(seed_value=params["random_state"])
return params
def get_xgb_params(self):
"""Get xgboost specific parameters."""
params = self.get_params()
# Parameters that should not go into native learner.
wrapper_specific = {
"importance_type",
"kwargs",
"missing",
"n_estimators",
"use_label_encoder",
"enable_categorical",
"early_stopping_rounds",
"callbacks",
"feature_types",
}
filtered = {}
for k, v in params.items():
if k not in wrapper_specific and not callable(v):
filtered[k] = v
return filtered
def get_num_boosting_rounds(self):
"""Gets the number of xgboost boosting rounds."""
# 100 is the default number of boosting rounds
return 100 if not self.n_estimators else self.n_estimators
def compile(self, X, y):
# set compiled flag
self.compiled = True
# instantiate Booster and compile funcs involved in calculations for training
params = self.get_xgb_params()
self._Booster = Booster(params, cache=[X, y], compile=True)
def fit(
self,
X,
y,
*,
sample_weight=None,
base_margin=None,
eval_set=None,
eval_metric=None,
early_stopping_rounds=None,
verbose=True,
xgb_model=None,
sample_weight_eval_set=None,
base_margin_eval_set=None,
feature_weights=None,
callbacks=None,
):
"""Fit gradient boosting model.
Note that calling ``fit()`` multiple times will cause the model object to be
re-fit from scratch. To resume training from a previous checkpoint, explicitly
pass ``xgb_model`` argument.
Parameters
----------
X
Feature matrix.
When the ``tree_method`` is set to ``hist``, internally, the
`QuantileDMatrix` will be used instead of the `DMatrix`
for conserving memory. However, this has performance implications when the
device of input data is not matched with algorithm. For instance, if the
input is a numpy array on CPU but ``cuda`` is used for training, then the
data is first processed on CPU then transferred to GPU.
y
Labels.
sample_weight
instance weights.
base_margin
global bias for each instance.
eval_set
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_metric
str, list of str, or callable, optional(deprecated in fit method).
early_stopping_rounds
int(deprecated in fit method).
verbose
If `verbose` is True and an evaluation set is used, the evaluation metric
measured on the validation set is printed to stdout at each boosting stage.
If `verbose` is an integer, the evaluation metric is printed at each
`verbose` boosting stage. The last boosting stage / the boosting stage found
by using `early_stopping_rounds` is also printed.
xgb_model
file name of stored XGBoost model or 'Booster' instance XGBoost model to be
loaded before training (allows training continuation).
sample_weight_eval_set
A list of the form [L_1, L_2, ..., L_n], where each L_i is an array like
object storing instance weights for the i-th validation set.
base_margin_eval_set
A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
object storing base margin for the i-th validation set.
feature_weights
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown.
callbacks
(deprecated in fit method).
"""
# skip all the validation as we're interested in calculations for now
# ToDo: add handling for custom objective
if self.compiled:
for i in range(self.get_num_boosting_rounds()):
self._Booster.update(X, y, i)
else:
params = self.get_xgb_params()
self._Booster = train(params, X, y, self.get_num_boosting_rounds())
return self
def predict(
self,
X,
output_margin=False,
validate_features=True,
base_margin=None,
iteration_range=None,
):
"""
Parameters
----------
X
Data to predict with.
output_margin
Whether to output the raw untransformed margin value.
validate_features
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
base_margin
Margin added to prediction.
iteration_range
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying ``iteration_range=(10,
20)``, then only the forests built during [10, 20) (half open set) rounds
are used in this prediction.
Returns
-------
prediction
"""
# skip the validation, as for now we simply call the predict method of
# underlying booster
return self.get_booster().predict(
data=X,
iteration_range=iteration_range,
output_margin=output_margin,
validate_features=validate_features,
)
class XGBClassifier(XGBModel, XGBClassifierBase):
# as for now simply calls the init method of a parent class, because we implement a
# minimal subset of functionality
def __init__(self, *, objective="binary:logistic", **kwargs):
super().__init__(objective=objective, **kwargs)
| ivy/ivy/functional/frontends/xgboost/sklearn.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/sklearn.py",
"repo_id": "ivy",
"token_count": 4455
} | 40 |
# local
from typing import Optional, Union, Tuple, List, Sequence
from numbers import Number
import ivy
from ivy.func_wrapper import (
handle_out_argument,
to_native_arrays_and_back,
handle_nestable,
handle_partial_mixed_function,
handle_array_like_without_promotion,
inputs_to_ivy_arrays,
handle_array_function,
infer_dtype,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def amax(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the maximum value of the input array ``x``.
.. note::
``amax`` is an alias of ``max`` and both function
behaves similarly in every backend except PyTorch and PaddlePaddle
(see `PyTorch's amax function
documentation<https://pytorch.org/docs/stable/generated/torch.amax.html>`_`)
(see `PaddlePaddle's amax function documentation<https://www.paddlepaddle.org.cn/
documentation/docs/zh/api/paddle/amax_cn.html>`_`)
.. note::
When the number of elements over which to compute the maximum value is zero, the
maximum value is implementation-defined. Specification-compliant libraries may
choose to raise an error, return a sentinel value (e.g., if ``x`` is a
floating-point input array, return ``NaN``), or return the minimum possible
value for the input array ``x`` data type (e.g., if ``x`` is a floating-point
array, return ``-infinity``).
**Special Cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the maximum value is ``NaN``
(i.e., ``NaN`` values propagate).
Parameters
----------
x
input array. Should have a real-valued data type.
axis
axis or axes along which maximum values must be computed. By default, the
maximum value must be computed over the entire array. If a tuple of integers,
maximum values must be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions) must be included
in the result as singleton dimensions, and, accordingly, the result must be
compatible with the input array (see `broadcasting<https://data-apis.org/
array-api/latest/API_specification/broadcasting.html#broadcasting>`_).
Otherwise, if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the maximum value was computed over the entire array, a zero-dimensional
array containing the maximum value; otherwise, a non-zero-dimensional array
containing the maximum values. The returned array must have the same data type
as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.max.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.amax(x)
>>> print(y)
ivy.array(3)
>>> x = ivy.array([0, 1, 2])
>>> z = ivy.array([0, 0, 0])
>>> y = ivy.amax(x, out=z)
>>> print(z)
ivy.array(2)
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.amax(x, axis=0, keepdims=True)
>>> print(y)
ivy.array([[4, 6, 10]])
>>> x = ivy.native_array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.amax(x)
>>> print(y)
ivy.array(10)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([2, 3, 4]))
>>> y = ivy.amax(x)
>>> print(y)
{
a: ivy.array(3),
b: ivy.array(4)
}
"""
return ivy.current_backend(x).amax(x, axis=axis, keepdims=keepdims, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def amin(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate the minimum value of the input array ``x``.
.. note::
``amin`` is an alias of ``min`` and both function
behaves similarly in every backend except PyTorch and PaddlePaddle
(see `PyTorch's amin function
documentation<https://pytorch.org/docs/stable/generated/torch.amin.html>`_`)
(see `PaddlePaddle's amin function documentation<https://www.paddlepaddle.org.cn/
documentation/docs/zh/api/paddle/amin_cn.html>`_`)
.. note::
When the number of elements over which to compute the minimum value is zero, the
minimum value is implementation-defined. Specification-compliant libraries may
choose to raise an error, return a sentinel value (e.g., if ``x`` is a
floating-point input array, return ``NaN``), or return the maximum possible value
for the input array ``x`` data type (e.g., if ``x`` is a floating-point array,
return ``+infinity``).
**Special Cases**
For floating-point operands,
- If ``x_i`` is ``NaN``, the minimum value is ``NaN``
(i.e., ``NaN`` values propagate).
Parameters
----------
x
input array. Should have a real-valued data type.
axis
axis or axes along which minimum values must be computed. By default, the
minimum value must be computed over the entire array. If a tuple of integers,
minimum values must be computed over multiple axes. Default: ``None``.
keepdims
optional boolean, if ``True``, the reduced axes (dimensions) must be included
in the result as singleton dimensions, and, accordingly, the result must be
compatible with the input array (see `broadcasting<https://data-apis.org/
array-api/latest/API_specification/broadcasting.html#broadcasting>`_).
Otherwise, if ``False``, the reduced axes (dimensions)
must not be included in the result.
Default: ``False``.
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array, a zero-dimensional
array containing the minimum value; otherwise, a non-zero-dimensional array
containing the minimum values. The returned array must have the same data type
as ``x``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.min.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.amin(x)
>>> print(y)
ivy.array(1)
>>> x = ivy.array([0, 1, 2])
>>> z = ivy.array([0, 0, 0])
>>> y = ivy.amin(x, out=z)
>>> print(z)
ivy.array(0)
>>> x = ivy.array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.amin(x, axis=0, keepdims=True)
>>> print(y)
ivy.array([[0, 1, 2]])
>>> x = ivy.native_array([[0, 1, 2], [4, 6, 10]])
>>> y = ivy.amin(x)
>>> print(y)
ivy.array(0)
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([2, 3, 4]))
>>> y = ivy.amin(x)
>>> print(y)
{
a: ivy.array(1),
b: ivy.array(2)
}
"""
return ivy.current_backend(x).amin(x, axis=axis, keepdims=keepdims, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
def lgamma(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the natural logarithm of the absolute value of the gamma
function on x.
Parameters
----------
x
input array. Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the natural log of Gamma(x) of each element in x.
The returned array must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.array([1.6, 2.6, 3.5])
>>> y = x.lgamma()
>>> print(y)
ivy.array([-0.11259177, 0.3574118 , 1.20097363])
>>> x = ivy.array([1., 2., 3. ])
>>> y = x.lgamma()
>>> print(y)
ivy.array([0. ,0. ,0.69314718])
>>> x = ivy.array([4.5, -4, -5.6])
>>> x.lgamma(out = x)
>>> print(x)
ivy.array([2.45373654, inf, -4.6477685 ])
"""
return ivy.current_backend(x).lgamma(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def sinc(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Calculate an implementation-dependent approximation of the principal
value of the normalized sinc function, having domain ``(-infinity,
+infinity)`` and codomain ``[-0.217234, 1]``, for each element ``x_i`` of
the input array ``x``. Each element ``x_i`` is assumed to be expressed in
radians.
**Special cases**
For floating-point operands,
- If x_i is NaN, the result is NaN.
- If ``x_i`` is ``0``, the result is ``1``.
- If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``.
Parameters
----------
x
input array. Should have a floating-point data type.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the normalized sinc function of each element in x.
The returned array must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.5, 1.5, 2.5, 3.5])
>>> y = x.sinc()
>>> print(y)
ivy.array([0.637,-0.212,0.127,-0.0909])
>>> x = ivy.array([1.5, 0.5, -1.5])
>>> y = ivy.zeros(3)
>>> ivy.sinc(x, out=y)
>>> print(y)
ivy.array([-0.212,0.637,-0.212])
With :class:`ivy.NativeArray` input:
>>> x = ivy.array([0.5, 1.5, 2.5, 3.5])
>>> y = ivy.sinc(x)
>>> print(y)
ivy.array([0.637,-0.212,0.127,-0.0909])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.5, 1.5, 2.5]),
... b=ivy.array([3.5, 4.5, 5.5]))
>>> y = x.sinc()
>>> print(y)
{
a: ivy.array([0.637,-0.212,0.127]),
b: ivy.array([-0.0909,0.0707,-0.0579])
}
"""
return ivy.current_backend(x).sinc(x, out=out)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def fmax(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Compute the element-wise maximums of two arrays. Differs from
ivy.maximum in the case where one of the elements is NaN. ivy.maximum
returns the NaN element while ivy.fmax returns the non-NaN element.
Parameters
----------
x1
First input array.
x2
Second input array.
out
optional output array, for writing the result to.
Returns
-------
ret
Array with element-wise maximums.
Examples
--------
>>> x1 = ivy.array([2, 3, 4])
>>> x2 = ivy.array([1, 5, 2])
>>> ivy.fmax(x1, x2)
ivy.array([ 2., 5., 4.])
>>> x1 = ivy.array([ivy.nan, 0, ivy.nan])
>>> x2 = ivy.array([0, ivy.nan, ivy.nan])
>>> ivy.fmax(x1, x2)
ivy.array([ 0., 0., nan])
"""
return ivy.current_backend().fmax(x1, x2, out=out)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def float_power(
x1: Union[ivy.Array, float, list, tuple],
x2: Union[ivy.Array, float, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Raise each base in x1 to the positionally-corresponding power in x2. x1
and x2 must be broadcastable to the same shape. This differs from the power
function in that integers, float16, and float32 are promoted to floats with
a minimum precision of float64 so that the result is always inexact.
Parameters
----------
x1
Array-like with elements to raise in power.
x2
Array-like of exponents. If x1.shape != x2.shape,
they must be broadcastable to a common shape
(which becomes the shape of the output).
out
optional output array, for writing the result to.
Returns
-------
ret
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars
Examples
--------
>>> x1 = ivy.array([1, 2, 3, 4, 5])
>>> ivy.float_power(x1, 3)
ivy.array([1., 8., 27., 64., 125.])
>>> x1 = ivy.array([1, 2, 3, 4, 5])
>>> x2 = ivy.array([2, 3, 3, 2, 1])
>>> ivy.float_power(x1, x2)
ivy.array([1., 8., 27., 16., 5.])
"""
return ivy.current_backend().float_power(x1, x2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def copysign(
x1: Union[ivy.Array, ivy.NativeArray, Number],
x2: Union[ivy.Array, ivy.NativeArray, Number],
/,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Change the signs of x1 to match x2 x1 and x2 must be broadcastable to a
common shape.
Parameters
----------
x1
Array or scalar to change the sign of
x2
Array or scalar from which the new signs are applied
Unsigned zeroes are considered positive.
out
optional output array, for writing the result to.
Returns
-------
ret
x1 with the signs of x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = ivy.array([-1, 0, 23, 2])
>>> x2 = ivy.array([1, -1, -10, 44])
>>> ivy.copysign(x1, x2)
ivy.array([ 1., -0., -23., 2.])
>>> ivy.copysign(x1, -1)
ivy.array([ -1., -0., -23., -2.])
>>> ivy.copysign(-10, 1)
ivy.array(10.)
"""
return ivy.current_backend().copysign(x1, x2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@infer_dtype
@handle_device
def count_nonzero(
a: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Count the number of non-zero values in the array a.
Parameters
----------
a
array for which to count non-zeros.
axis
optional axis or tuple of axes along which to count non-zeros. Default is
None, meaning that non-zeros will be counted along a flattened
version of the input array.
keepdims
optional, if this is set to True, the axes that are counted are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
dtype
optional output dtype. Default is of type integer.
out
optional output array, for writing the result to.
Returns
-------
ret
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
Examples
--------
>>> a = ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]])
>>> ivy.count_nonzero(a)
ivy.array(7)
>>> a = ivy.array([[0, 1, 2, 3],[4, 5, 6, 7]])
>>> ivy.count_nonzero(a, axis=0)
ivy.array([1, 2, 2, 2])
>>> a = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> ivy.count_nonzero(a, axis=(0,1), keepdims=True)
ivy.array([[[3, 4]]])
"""
return ivy.current_backend().count_nonzero(
a, axis=axis, keepdims=keepdims, dtype=dtype, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@infer_dtype
@handle_device
def nansum(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[Union[Tuple[int, ...], int]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
keepdims: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
Parameters
----------
x
Input array.
axis
Axis or axes along which the sum is computed.
The default is to compute the sum of the flattened array.
dtype
The type of the returned array and of the accumulator in
which the elements are summed. By default, the dtype of input is used.
keepdims
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
A new array holding the result is returned unless out is specified,
in which it is returned.
Examples
--------
>>> a = ivy.array([[ 2.1, 3.4, ivy.nan], [ivy.nan, 2.4, 2.1]])
>>> ivy.nansum(a)
10.0
>>> ivy.nansum(a, axis=0)
ivy.array([2.1, 5.8, 2.1])
>>> ivy.nansum(a, axis=1)
ivy.array([5.5, 4.5])
"""
return ivy.current_backend().nansum(
x, axis=axis, dtype=dtype, keepdims=keepdims, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def isclose(
a: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers.
The relative difference (rtol * abs(b)) and the absolute difference
atol are added together to compare against the absolute difference
between a and b.
The default atol is not appropriate for comparing numbers that are
much smaller than one
Parameters
----------
a
First input array.
b
Second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in a will be
considered equal to NaN's in b in the output array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Returns a boolean array of where a and b are equal within the given
tolerance. If both a and b are scalars, returns a single boolean value.
Examples
--------
>>> ivy.isclose([1e10,1e-7], [1.00001e10,1e-8])
ivy.array([True, False])
>>> ivy.isclose([1.0, ivy.nan], [1.0, ivy.nan], equal_nan=True)
ivy.array([True, True])
>>> ivy.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
ivy.array([False, False])
>>> ivy.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], rtol=0.005, atol=0.0)
ivy.array([False, True])
"""
return ivy.current_backend().isclose(
a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def signbit(
x: Union[ivy.Array, ivy.NativeArray, float, int, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return element-wise True where signbit is set (less than zero).
Parameters
----------
x
Array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
Output array, or reference to out if that was supplied.
This is a scalar if x is a scalar.
Examples
--------
>>> x = ivy.array([1, -2, 3])
>>> ivy.signbit(x)
ivy.array([False, True, False])
"""
return ivy.current_backend(x).signbit(x, out=out)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def hypot(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Return the hypotenuse given the two sides of a right angle triangle.
Parameters
----------
x1
The first input array
x2
The second input array
Returns
-------
ret
An array with the hypotenuse
Examples
--------
>>> a = ivy.array([3.0, 4.0, 5.0])
>>> b = ivy.array([4.0, 5.0, 6.0])
>>> ivy.hypot(a, b)
ivy.array([5.0, 6.4031, 7.8102])
"""
return ivy.current_backend(x1, x2).hypot(x1, x2, out=out)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def diff(
x: Union[ivy.Array, ivy.NativeArray, list, tuple],
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[ivy.Array, ivy.NativeArray, int, list, tuple]] = None,
append: Optional[Union[ivy.Array, ivy.NativeArray, int, list, tuple]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the n-th discrete difference along the given axis.
Parameters
----------
x
Array-like input.
n
The number of times values are differenced. If zero, the input is returned
as-is.
axis
The axis along which the difference is taken, default is the last axis.
prepend,append
Values to prepend/append to x along given axis prior to performing the
difference. Scalar values are expanded to arrays with length 1 in the direction
of axis and the shape of the input array in along all other axes. Otherwise the
dimension and shape must match x except along axis.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns the n-th discrete difference along the given axis.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.array([1, 2, 4, 7, 0])
>>> ivy.diff(x)
ivy.array([ 1, 2, 3, -7])
"""
return ivy.current_backend().diff(
x, n=n, axis=axis, prepend=prepend, append=append, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_device
def allclose(
a: Union[ivy.Array, ivy.NativeArray],
b: Union[ivy.Array, ivy.NativeArray],
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[ivy.Array] = None,
) -> bool:
"""Return a True if the two arrays are element-wise equal within given
tolerance; otherwise False.
The tolerance values are positive, typically very small numbers.
The relative difference (rtol * abs(x2)) and the absolute difference
atol are added together to compare against the absolute difference
between x1 and x2.
The default atol is not appropriate for comparing numbers that are
much smaller than one
Parameters
----------
x1
First input array.
x2
Second input array.
rtol
The relative tolerance parameter.
atol
The absolute tolerance parameter.
equal_nan
Whether to compare NaN's as equal. If True, NaN's in x1 will be
considered equal to NaN's in x2 in the output array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Returns True if the two arrays are equal within the given tolerance;
False otherwise.
Examples
--------
>>> x1 = ivy.array([1e10, 1e-7])
>>> x2 = ivy.array([1.00001e10, 1e-8])
>>> y = ivy.allclose(x1, x2)
>>> print(y)
ivy.array(False)
>>> x1 = ivy.array([1.0, ivy.nan])
>>> x2 = ivy.array([1.0, ivy.nan])
>>> y = ivy.allclose(x1, x2, equal_nan=True)
>>> print(y)
ivy.array(True)
>>> x1 = ivy.array([1e-10, 1e-10])
>>> x2 = ivy.array([1.00001e-10, 1e-10])
>>> y = ivy.allclose(x1, x2, rtol=0.005, atol=0.0)
>>> print(y)
ivy.array(True)
"""
return ivy.current_backend().allclose(
a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, out=out
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def fix(
x: Union[ivy.Array, ivy.NativeArray, float, int, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x
Array input.
out
optional output array, for writing the result to.
Returns
-------
ret
Array of floats with elements corresponding to input elements
rounded to nearest integer towards zero, element-wise.
Examples
--------
>>> x = ivy.array([2.1, 2.9, -2.1])
>>> ivy.fix(x)
ivy.array([ 2., 2., -2.])
"""
return ivy.current_backend(x).fix(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def nextafter(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> bool:
"""Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1
First input array.
x2
Second input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x1 = ivy.array([1.0e-50, 2.0e+50])
>>> x2 = ivy.array([2.0, 1.0])
>>> ivy.nextafter(x1, x2)
ivy.array([1.4013e-45., 3.4028e+38])
"""
return ivy.current_backend(x1, x2).nextafter(x1, x2, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def zeta(
x: Union[ivy.Array, ivy.NativeArray],
q: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> bool:
"""Compute the Hurwitz zeta function elementwisely with each pair of floats
in two arrays.
Parameters
----------
x
First input array.
q
Second input array, must have the same shape as the first input array
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Array with values computed from zeta function from
input arrays' values.
Examples
--------
>>> x = ivy.array([5.0, 3.0])
>>> q = ivy.array([2.0, 2.0])
>>> ivy.zeta(x, q)
ivy.array([0.0369, 0.2021])
"""
return ivy.current_backend(x, q).zeta(x, q, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_device
def gradient(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
spacing: Union[int, list, tuple] = 1,
edge_order: int = 1,
axis: Optional[Union[int, list, tuple]] = None,
) -> Union[ivy.Array, List[ivy.Array]]:
"""Calculate gradient of x with respect to (w.r.t.) spacing.
Parameters
----------
x
input array representing outcomes of the function
spacing
if not given, indices of x will be used
if scalar indices of x will be scaled with this value
if array gradient of x w.r.t. spacing
edge_order
1 or 2, for 'frist order' and 'second order' estimation
of boundary values of gradient respectively.
Note: jax supports edge_order=1 case only
axis
dimension(s) to approximate the gradient over
by default partial gradient is computed in every dimension
Returns
-------
ret
Array with values computed from gradient function from
inputs
Examples
--------
>>> spacing = (ivy.array([-2., -1., 1., 4.]),)
>>> x = ivy.array([4., 1., 1., 16.], )
>>> ivy.gradient(x, spacing=spacing)
ivy.array([-3., -2., 2., 5.])
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x)
[ivy.array([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]), ivy.array([[ 1. , 1.5, 3. , 4. ],
[10. , 15. , 30. , 40. ]])]
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, spacing=2.0)
[ivy.array([[ 4.5, 9. , 18. , 36. ],
[ 4.5, 9. , 18. , 36. ]]), ivy.array([[ 0.5 , 0.75, 1.5 , 2. ],
[ 5. , 7.5 , 15. , 20. ]])]
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, axis=1)
ivy.array([[ 1. , 1.5, 3. , 4. ],
[10. , 15. , 30. , 40. ]])
>>> x = ivy.array([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> ivy.gradient(x, spacing=[3., 2.])
[ivy.array([[ 3., 6., 12., 24.],
[ 3., 6., 12., 24.]]), ivy.array([[ 0.5 , 0.75, 1.5 , 2. ],
[ 5. , 7.5 , 15. , 20. ]])]
>>> spacing = (ivy.array([0, 2]), ivy.array([0, 3, 6, 9]))
>>> ivy.gradient(x, spacing=spacing)
[ivy.array([[ 4.5, 9. , 18. , 36. ],
[ 4.5, 9. , 18. , 36. ]]), ivy.array([[ 0.33333333, 0.5, 1., 1.33333333],
[ 3.33333333, 5. , 10. , 13.33333333]])]
"""
return ivy.current_backend(x).gradient(
x, spacing=spacing, edge_order=edge_order, axis=axis
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def xlogy(
x: Union[ivy.Array, ivy.NativeArray],
y: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> bool:
"""Compute x*log(y) element-wise so that the result is 0 if x = 0.
Parameters
----------
x
First input array.
y
Second input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x = ivy.zeros(3)
>>> y = ivy.array([-1.0, 0.0, 1.0])
>>> ivy.xlogy(x, y)
ivy.array([0.0, 0.0, 0.0])
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([3.0, 2.0, 1.0])
>>> ivy.xlogy(x, y)
ivy.array([1.0986, 1.3863, 0.0000])
"""
return ivy.current_backend(x, y).xlogy(x, y, out=out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_ivy_arrays
def binarizer(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
threshold: float = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Map the values of the input tensor to either 0 or 1, element-wise, based
on the outcome of a comparison against a threshold value.
Parameters
----------
x
Data to be binarized
threshold
Values greater than this are
mapped to 1, others to 0.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Binarized output data
"""
xc = ivy.copy_array(x, out=out)
if ivy.is_bool_dtype(xc) and ivy.current_backend_str() == "torch":
xc = ivy.astype(xc, ivy.default_float_dtype())
if ivy.is_complex_dtype(xc):
xc = ivy.abs(xc)
ret = ivy.where(xc > threshold, 1, 0)
return ret
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def conj(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the complex conjugate for each element ``x_i`` of the input array
``x``.
For complex number of the form
.. math::
a + bj
the complex conjugate is defined as
.. math::
a - bj
Hence, the returned conjugates must be computed by negating
the imaginary component of each element ``x_i``
This method conforms to the
`Array API Standard <https://data-apis.org/array-api/latest/>`_.
This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.conj.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Parameters
----------
x
input array.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array of the same dtype as the input array with
the complex conjugates of the complex values present
in the input array. If x is a scalar then a scalar
will be returned.
The descriptions above assume an array input for simplicity, but
the method also accepts :class:`ivy.Container` instances
in place of: class:`ivy.Array` or :class:`ivy.NativeArray`
instances, as shown in the type hints and also the examples below.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([4.2-0j, 3j, 7+5j])
>>> z = ivy.conj(x)
>>> print(z)
ivy.array([4.2-0.j, 0. -3.j, 7. -5.j])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-6.7-7j, 0.314+0.355j, 1.23]),
... b=ivy.array([5j, 5.32-6.55j, 3.001]))
>>> z = ivy.conj(x)
>>> print(z)
{
a: ivy.array([-6.7+7.j, 0.314-0.355j, 1.23-0.j]),
b: ivy.array([0.-5.j, 5.32+6.55j, 3.001-0.j])
}
"""
return ivy.current_backend(x).conj(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def ldexp(
x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return x1 * (2**x2), element-wise.
Parameters
----------
x1
Input array.
x2
Input array.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
The next representable values of x1 in the direction of x2.
Examples
--------
>>> x1 = ivy.array([1, 2, 3])
>>> x2 = ivy.array([0, 1, 2])
>>> ivy.ldexp(x1, x2)
ivy.array([1, 4, 12])
"""
return ivy.current_backend(x1, x2).ldexp(x1, x2, out=out)
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
@handle_device
def lerp(
input: Union[ivy.Array, ivy.NativeArray],
end: Union[ivy.Array, ivy.NativeArray],
weight: Union[ivy.Array, ivy.NativeArray, float],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a linear interpolation of two arrays start (given by input) and
end.
based on a scalar or array weight.
input + weight * (end - input), element-wise.
Parameters
----------
input
array of starting points
end
array of ending points
weight
the weight for the interpolation formula. Scalar or Array.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
The result of input + ((end - input) * weight)
Examples
--------
With :class:`ivy.Array` inputs:
>>> input = ivy.array([1, 2, 3])
>>> end = ivy.array([10, 10, 10])
>>> weight = 0.5
>>> y = ivy.lerp(input, end, weight)
>>> print(y)
ivy.array([5.5, 6. , 6.5])
>>> input = ivy.array([1.1, 1.2, 1.3])
>>> end = ivy.array([20])
>>> weight = ivy.array([0.4, 0.5, 0.6])
>>> y = ivy.zeros(3)
>>> ivy.lerp(input, end, weight, out=y)
>>> print(y)
ivy.array([ 8.65999985, 10.60000038, 12.52000046])
>>> input = ivy.array([[4, 5, 6],[4.1, 4.2, 4.3]])
>>> end = ivy.array([10])
>>> weight = ivy.array([0.5])
>>> ivy.lerp(input, end, weight, out=input)
>>> print(input)
ivy.array([[7. , 7.5 , 8. ],
[7.05000019, 7.0999999 , 7.1500001 ]])
With :class:`ivy.Container` input:
>>> input = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> end = ivy.array([10.])
>>> weight = 1.1
>>> y = input.lerp(end, weight)
>>> print(y)
{
a: ivy.array([11., 10.90000057, 10.80000019]),
b: ivy.array([10.70000076, 10.60000038, 10.5])
}
>>> input = ivy.Container(a=ivy.array([10.1, 11.1]), b=ivy.array([10, 11]))
>>> end = ivy.Container(a=ivy.array([5]), b=ivy.array([0]))
>>> weight = 0.5
>>> y = input.lerp(end, weight)
>>> print(y)
{
a: ivy.array([7.55000019, 8.05000019]),
b: ivy.array([5., 5.5])
}
"""
input_end_allowed_types = [
"int8",
"int16",
"int32",
"int64",
"float16",
"bfloat16",
"float32",
"float64",
"complex",
]
weight_allowed_types = ["float16", "bfloat16", "float32", "float64"]
if not ivy.is_array(input):
input = ivy.array([input])
if not ivy.is_array(end):
end = ivy.array([end])
if (
ivy.dtype(input) not in input_end_allowed_types
or ivy.dtype(end) not in input_end_allowed_types
):
input = ivy.astype(input, "float64")
end = ivy.astype(end, "float64")
if ivy.is_array(weight):
if ivy.dtype(weight) not in weight_allowed_types:
weight = ivy.astype(weight, "float64")
elif not isinstance(weight, float):
weight = ivy.astype(ivy.array([weight]), "float64")
return ivy.add(input, ivy.multiply(weight, ivy.subtract(end, input)), out=out)
lerp.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def frexp(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[Tuple[ivy.Array, ivy.Array]] = None,
) -> Tuple[ivy.Array, ivy.Array]:
"""Decompose the elements of x into mantissa and twos exponent.
Parameters
----------
x
Input array.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
A tuple of two arrays, the mantissa and the twos exponent.
Examples
--------
>>> x = ivy.array([1, 2, 3])
>>> ivy.frexp(x)
(ivy.array([0.5, 0.5, 0.75]), ivy.array([1, 2, 2]))
"""
return ivy.current_backend(x).frexp(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
def modf(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[Tuple[ivy.Array, ivy.Array]] = None,
) -> Tuple[ivy.Array, ivy.Array]:
"""Decompose the elements of x into fractional and integral parts.
Parameters
----------
x
Input array.
out
Optional output array for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
A tuple of two arrays, the fractional and integral parts.
Examples
--------
>>> x = ivy.array([1.5, 2.7, 3.9])
>>> ivy.modf(x)
(ivy.array([0.5, 0.7, 0.9]), ivy.array([1, 2, 3]))
"""
return ivy.current_backend(x).modf(x, out=out)
@handle_exceptions
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def digamma(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the logarithmic derivative of the gamma function at x.
Note
----
The Ivy version only accepts real-valued inputs.
Parameters
----------
x
Input array.
out
Alternate output array in which to place the result.
The default is None.
Returns
-------
ret
Array with values computed from digamma function from
input arrays' values, element-wise.
Examples
--------
>>> x = ivy.array([.9, 3, 3.2])
>>> y = ivy.digamma(x)
ivy.array([-0.7549271 0.92278427 0.9988394])
"""
return ivy.current_backend(x).digamma(x, out=out)
@handle_exceptions
@handle_nestable
@inputs_to_ivy_arrays
@handle_array_function
def sparsify_tensor(
x: Union[ivy.Array, ivy.NativeArray],
card: int,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Zeros out all elements in the tensor except `card` elements with maximum
absolute values.
Parameters
----------
x
Tensor to be sparsified
card
Desired number of non-zero elements in the tensor
out
Optional output array for writing the result to.
Returns
-------
ivy.array of shape tensor.shape
Examples
--------
>>> x = ivy.arange(100)
>>> x = ivy.reshape(x, (10, 10))
>>> sparsify_tensor(x, 10)
ivy.array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
"""
if card >= ivy.prod(ivy.array(x.shape)):
return ivy.inplace_update(out, x) if ivy.exists(out) else x
_shape = ivy.shape(x)
x = ivy.reshape(ivy.sort(ivy.abs(x)), (-1,))
tensor = ivy.concat([ivy.zeros(len(x) - card, dtype=x.dtype), x[-card:]], axis=0)
return ivy.reshape(tensor, _shape, out=out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def erfc(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""
Complementary error function, 1 - erf(x)
Parameters
----------
x
Input array of real or complex valued argument.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Values of the complementary error function.
Examples
--------
>>> x = ivy.array([2, -1., 0])
>>> ivy.erfc(x)
ivy.array([0.00467773, 1.84270084, 1. ])
"""
return ivy.current_backend(x).erfc(x, out=out)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def erfinv(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
out: Optional[ivy.Array] = None,
):
"""Compute the inverse error function.
Parameters
----------
x
Input array of real or complex valued argument.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Values of the inverse error function.
Examples
--------
>>> x = ivy.array([0, 0.5, -1.])
>>> ivy.erfinv(x)
ivy.array([0.0000, 0.4769, -inf])
"""
return ivy.current_backend(x).erfinv(x, out=out)
| ivy/ivy/functional/ivy/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 19863
} | 41 |
# global
from typing import Optional
# local
import ivy
from ivy import handle_out_argument, handle_nestable
from ivy.utils.exceptions import handle_exceptions
@handle_out_argument
@handle_nestable
@handle_exceptions
def optional_get_element(
x: Optional[ivy.Array] = None,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""If the input is a tensor or sequence type, it returns the input. If the
input is an optional type, it outputs the element in the input. It is an
error if the input is an empty optional-type (i.e. does not have an
element) and the behavior is undefined in this case.
Parameters
----------
x
Input array
out
Optional output array, for writing the result to.
Returns
-------
ret
Input array if it is not None
"""
if x is None:
raise ivy.utils.exceptions.IvyError(
"The requested optional input has no value."
)
return x
| ivy/ivy/functional/ivy/experimental/utility.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/utility.py",
"repo_id": "ivy",
"token_count": 359
} | 42 |
from . import activations
from .activations import *
from . import converters
from .converters import *
from . import initializers
from .initializers import *
from . import layers
from .layers import *
from . import losses
from .losses import *
from . import module
from .module import *
from . import norms
from .norms import *
from . import optimizers
from .optimizers import *
from . import sequential
from .sequential import *
| ivy/ivy/stateful/__init__.py/0 | {
"file_path": "ivy/ivy/stateful/__init__.py",
"repo_id": "ivy",
"token_count": 112
} | 43 |
# global
import os
import copy
import types
import ivy
import importlib
import functools
import numpy as np
import gc
from ivy.utils import _importlib, verbosity
# local
from ivy.func_wrapper import _wrap_function
from ivy.utils.backend.sub_backend_handler import (
_clear_current_sub_backends,
fn_name_from_version_specific_fn_name,
)
from ivy.utils.exceptions import _handle_inplace_mode
backend_stack = []
compiled_backends = {}
_compiled_backends_ids = {}
implicit_backend = "numpy"
ivy_original_dict = ivy.__dict__.copy()
ivy_original_fn_dict = {}
class ContextManager:
def __init__(self, module):
self.module = module
def __enter__(self):
return set_backend(self.module)
def __exit__(self, exc_type, exc_val, exc_tb):
previous_backend()
_backends_subpackage_path = "ivy.functional.backends"
_backend_dict = {}
_backend_reverse_dict = {}
for backend in os.listdir(
os.path.join(
ivy.__path__[0].rpartition(os.path.sep)[0], # type: ignore
_backends_subpackage_path.replace(".", os.path.sep),
)
):
if backend.startswith("__"):
continue
backend_path = f"{_backends_subpackage_path}.{backend}"
_backend_dict[backend] = backend_path
_backend_reverse_dict[backend_path] = backend
# Backend Getting/Setting #
# ----------------------- #
def prevent_access_locally(fn):
@functools.wraps(fn)
def _prevent_access_locally(*args, **kwargs):
if ivy.is_local():
raise RuntimeError(f"Calling {fn.__name__} is not allowed on this object.")
return fn(*args, **kwargs)
return _prevent_access_locally
@functools.lru_cache
def _get_backend_for_arg(arg_module_name):
for backend, module_name in _backend_dict.items():
if backend in arg_module_name:
return importlib.import_module(module_name)
def _determine_backend_from_args(args):
"""Return the appropriate Ivy backend, given some arguments.
Parameters
----------
args
the arguments from which to figure out the corresponding Ivy backend.
Returns
-------
ret
the Ivy backend inferred from `args`.
Examples
--------
If `args` is a jax.numpy array, then Ivy's jax backend will be returned:
>>> from ivy.utils.backend.handler import _determine_backend_from_args
>>> import jax.numpy as jnp
>>> x = jnp.array([1])
>>> print(_determine_backend_from_args(x))
<module 'ivy.functional.backends.jax' from '/ivy/ivy/functional/backends/jax/__init__.py'> # noqa
""" # noqa: E501
arg_type = type(args)
if isinstance(args, ivy.Array):
args = args.data
if isinstance(args, dict):
for value in args.values():
# recursively call the function for each value in the dictionary
lib = _determine_backend_from_args(value)
if lib:
return lib
# check if args is a list or tuple
elif arg_type in [list, tuple]:
for arg in args:
# recursively call the function for each element in the list/tuple
lib = _determine_backend_from_args(arg)
if lib:
return lib
else:
# check if the class module of the arg is in _array_types
return _get_backend_for_arg(args.__class__.__module__)
def set_backend_to_specific_version(backend):
"""Update the backend dict to make the original function name point to the
version specific one.
Parameters
----------
backend
the backend module for which we provide the version support
"""
# TODO: add functionality and tests
f = str(backend.__name__)
f = f[f.index("backends") + 9 :]
f = importlib.import_module(f)
f_version = f.__version__
for key in list(backend.__dict__):
if "_v_" in key:
orig_name = fn_name_from_version_specific_fn_name(key, f_version)
if orig_name:
backend.__dict__[orig_name] = backend.__dict__[key]
backend.__dict__[orig_name].__name__ = orig_name
def current_backend(*args, **kwargs):
"""Return the current backend. Priorities: global_backend > argument's
backend.
Parameters
----------
*args/**kwargs
the arguments from which to try to infer the backend, when there is
no globally set backend.
Returns
-------
ret
Ivy's current backend.
Examples
--------
If no global backend is set, then the backend is inferred from the arguments:
>>> import numpy as np
>>> x = np.array([2.0])
>>> print(ivy.current_backend(x))
<module 'ivy.functional.backends.numpy' from '/ivy/ivy/functional/backends/numpy/__init__.py'> # noqa
The global backend set in set_backend has priority over any arguments
passed to current_backend:
>>> import numpy as np
>>> ivy.set_backend("jax")
>>> x = np.array([2.0])
>>> print(ivy.current_backend(x))
<module 'ivy.functional.backends.jax' from '/ivy/ivy/functional/backends/jax/__init__.py'> # noqa
""" # noqa: E501
global implicit_backend
# if a global backend has been set with
# set_backend then this will be returned
if backend_stack:
f = backend_stack[-1]
if verbosity.level > 0:
verbosity.cprint(f"Using backend from stack: {f}")
return f
# if no global backend exists, we try to infer
# the backend from the arguments
f = _determine_backend_from_args(list(args) + list(kwargs.values()))
if f is not None:
if verbosity.level > 0:
verbosity.cprint(f"Using backend from type: {f}")
implicit_backend = f.current_backend_str()
return f
return importlib.import_module(_backend_dict[implicit_backend])
def _set_module_backend(
original_dict, target, backend, invalid_dtypes=None, backend_str=None
):
invalid_dtypes = (
backend.invalid_dtypes if invalid_dtypes is None else invalid_dtypes
)
backend_str = backend.current_backend_str() if backend_str is None else backend_str
for k, v in original_dict.items():
if k in ivy.GLOBAL_PROPS:
continue
compositional = k not in backend.__dict__
if compositional:
if k in invalid_dtypes and k in target.__dict__:
del target.__dict__[k]
continue
backend.__dict__[k] = v
target.__dict__[k] = _wrap_function(
key=k, to_wrap=backend.__dict__[k], original=v, compositional=compositional
)
if (
isinstance(v, types.ModuleType)
and "ivy.functional." in v.__name__
and os.path.join("{}", "__init__.py").format(backend_str) not in v.__file__
):
_set_module_backend(
v.__dict__,
target.__dict__[k],
backend.__dict__[k],
invalid_dtypes=invalid_dtypes,
backend_str=backend_str,
)
def _handle_backend_specific_vars(target, backend):
if backend.current_backend_str() == "numpy":
target.set_default_device("cpu")
elif backend.current_backend_str() == "jax":
target.set_global_attr("RNG", target.functional.backends.jax.random.RNG)
def _data_to_new_backend(x, previous_backend):
device = previous_backend.dev(x.data)
try:
result = ivy.from_dlpack(previous_backend.to_dlpack(x.data))
result = ivy.to_device(result, device)
except Exception:
np_res = previous_backend.to_numpy(x.data)
result = ivy.asarray(np_res, device=device)
return result
def dynamic_backend_converter(backend_stack):
from ivy.functional.ivy.gradients import _variable
def _is_var(obj, backend):
if isinstance(obj, ivy.Container):
def _map_fn(x):
x = x.data if isinstance(x, ivy.Array) else x
if x.__class__.__module__ in (
"numpy",
"jax.interpreters.xla",
"jaxlib.xla_extension",
):
return False
return backend.gradients._is_variable(x)
return obj.cont_map(lambda x, kc: _map_fn(x)).cont_all_true()
else:
obj = obj.data if isinstance(obj, ivy.Array) else obj
if obj.__class__.__module__ in (
"numpy",
"jax.interpreters.xla",
"jaxlib.xla_extension",
):
return False
return backend.gradients._is_variable(obj)
# get all ivy array instances in the project scope
container_list = [
obj
for obj in gc.get_objects()
if "ivy" in type(obj).__module__ and isinstance(obj, ivy.Container)
]
cont_array_idxs = ivy.nested_argwhere(
container_list,
lambda x: isinstance(x, ivy.Array) and x.backend != ivy.current_backend_str(),
)
cont_array_vals = ivy.multi_index_nest(container_list, cont_array_idxs)
array_list = [
obj
for obj in gc.get_objects()
if "ivy" in type(obj).__module__ and isinstance(obj, ivy.Array)
]
array_list.extend(cont_array_vals)
# filter uninitialized arrays and arrays with other backends, and ensure the order
array_list = [
arr
for arr in array_list
if arr.__dict__ and arr.backend != ivy.current_backend_str()
]
new_objs = [obj for obj in array_list if obj.dynamic_backend]
# now convert all ivy.Array and ivy.Container instances
# to the new backend
for obj in new_objs:
# the following if condition avoids converting arrays that were already
# updated inplace i.e. are references to other arrays
if obj.backend != ivy.current_backend_str():
backend = ivy.with_backend(obj.backend, cached=True)
if _is_var(obj, backend):
native_var = backend.gradients._variable_data(obj)
data = _data_to_new_backend(native_var, backend)
new_data = _variable(data)
else:
new_data = _data_to_new_backend(obj, backend)
if isinstance(obj, ivy.Container):
obj.cont_inplace_update(new_data)
else:
obj.data = new_data.data
@prevent_access_locally
def set_backend(backend: str, dynamic: bool = False):
"""Set `backend` to be the global backend.
Will also convert all Array and Container objects to the new backend if `dynamic` =
True
Examples
--------
If we set the global backend to be numpy, then subsequent calls to ivy functions
will be called from Ivy's numpy backend:
>>> ivy.set_backend("numpy")
>>> native = ivy.native_array([1])
>>> print(type(native))
<class 'numpy.ndarray'>
Or with jax as the global backend:
>>> ivy.set_backend("jax")
>>> native = ivy.native_array([1])
>>> print(type(native))
<class 'jaxlib.xla_extension.ArrayImpl'>
""" # noqa
ivy.utils.assertions.check_false(
isinstance(backend, str) and backend not in _backend_dict,
f"backend must be one from {list(_backend_dict.keys())}",
)
# update the global dict with the new backend
with ivy.locks["backend_setter"]:
global ivy_original_dict
if not backend_stack:
ivy_original_dict = ivy.__dict__.copy()
_clear_current_sub_backends()
if isinstance(backend, str):
temp_stack = []
while backend_stack:
temp_stack.append(previous_backend())
backend = importlib.import_module(_backend_dict[backend])
for fw in reversed(temp_stack):
backend_stack.append(fw)
if backend.current_backend_str() == "numpy":
ivy.set_default_device("cpu")
elif backend.current_backend_str() == "jax":
ivy.set_global_attr("RNG", ivy.functional.backends.jax.random.RNG)
backend_stack.append(backend)
set_backend_to_specific_version(backend)
_set_module_backend(ivy_original_dict, ivy, backend)
# following snippet is required to update the ivy.functional namespace with
# backend-specific functions
for key in ivy.__dict__.keys():
if key in ivy.functional.__dict__ and not key.startswith("__"):
ivy.functional.__dict__[key] = ivy.__dict__[key]
if dynamic:
dynamic_backend_converter(backend_stack)
for sub_backend in ivy.available_sub_backends:
ivy.set_sub_backend(sub_backend)
if verbosity.level > 0:
verbosity.cprint(f"backend stack: {backend_stack}")
_handle_inplace_mode()
return ivy
def set_numpy_backend():
"""Set NumPy to be the global backend.
equivalent to `ivy.set_backend("numpy")`.
""" # noqa
set_backend("numpy")
def set_jax_backend():
"""Set JAX to be the global backend.
equivalent to `ivy.set_backend("jax")`.
""" # noqa
set_backend("jax")
def set_tensorflow_backend():
"""Set TensorFlow to be the global backend.
equivalent to `ivy.set_backend("tensorflow")`.
"""
set_backend("tensorflow")
def set_torch_backend():
"""Set torch to be the global backend.
equivalent to `ivy.set_backend("torch")`.
""" # noqa
set_backend("torch")
def set_paddle_backend():
"""Set paddle to be the global backend.
equivalent to `ivy.set_backend("paddle")`.
""" # noqa
set_backend("paddle")
def set_mxnet_backend():
"""Set MXNet to be the global backend.
equivalent to `ivy.set_backend("mx")`.
""" # noqa
set_backend("mxnet")
@prevent_access_locally
def previous_backend():
"""Unset the current global backend, and adjusts the ivy dict such that
either a previously set global backend is then used as the backend,
otherwise we return to Ivy's implementations.
Returns
-------
ret
the backend that was unset, or None if there was no set global backend.
Examples
--------
Torch is the last set backend hence is the backend used in the first examples.
However, as seen in the example after, if `previous_backend` is called before
`ivy.native_array` then tensorflow will become the current backend and any
torch backend implementations in the Ivy dict will be swapped with the
tensorflow implementation::
>>> ivy.set_backend("tensorflow")
>>> ivy.set_backend("torch")
>>> x = ivy.native_array([1])
>>> print(type(x))
<class 'torch.Tensor'>
>>> ivy.set_backend("tensorflow")
>>> ivy.set_backend("torch")
>>> ivy.previous_backend()
>>> x = ivy.native_array([1])
>>> print(type(x))
<class'tensorflow.python.framework.ops.EagerTensor'>
""" # noqa
backend = None
# if the backend stack is empty, nothing is done then we just return `None`
if backend_stack:
backend = backend_stack.pop(-1) # remove last backend from the stack
if backend.current_backend_str() == "numpy":
ivy.unset_default_device()
elif backend.current_backend_str() == "jax":
ivy.del_global_attr("RNG")
# the new backend is the backend that was set before the one
# we just removed from the stack, or Ivy if there was no
# previously set backend
if backend_stack:
new_backend = backend_stack[-1]
if new_backend.current_backend_str() == "numpy":
ivy.set_default_device("cpu")
elif new_backend.current_backend_str() == "jax":
ivy.set_global_attr("RNG", ivy.functional.backends.jax.random.RNG)
new_backend_dict = (
backend_stack[-1].__dict__ if backend_stack else ivy_original_dict
)
# wrap backend functions if there still is a backend, and add functions
# to ivy namespace
for k, v in new_backend_dict.items():
if k in ivy.GLOBAL_PROPS:
continue
if backend_stack and k in ivy_original_dict:
v = _wrap_function(k, v, ivy_original_dict[k])
if k in ivy_original_dict:
ivy.__dict__[k] = v
if k in ivy.functional.__dict__ and not k.startswith("__"):
ivy.functional.__dict__[k] = v
if verbosity.level > 0:
verbosity.cprint(f"backend stack: {backend_stack}")
_handle_inplace_mode()
return backend
@prevent_access_locally
def unset_backend():
while backend_stack:
previous_backend()
@prevent_access_locally
def choose_random_backend(excluded=None):
excluded = [] if excluded is None else excluded
while True:
ivy.utils.assertions.check_equal(
len(excluded),
4,
inverse=True,
message="""Unable to select backend, all backends are excluded,\
or not installed.""",
as_array=False,
)
f = np.random.choice(
[f_srt for f_srt in list(_backend_dict.keys()) if f_srt not in excluded]
)
if f is None:
excluded.append(f)
continue
else:
print(f"\nselected backend: {f}\n")
return f
# noinspection PyProtectedMember
@prevent_access_locally
def with_backend(backend: str, cached: bool = True):
# Use already compiled object
if cached and backend in compiled_backends:
cached_backend = compiled_backends[backend][-1]
return cached_backend
with _importlib.LocalIvyImporter():
ivy_pack = _importlib._import_module("ivy")
ivy_pack._is_local_pkg = True
ivy_pack._compiled_id = id(ivy_pack)
backend_module = _importlib._import_module(
ivy_pack.utils.backend.handler._backend_dict[backend], ivy_pack.__package__
)
_handle_backend_specific_vars(ivy_pack, backend_module)
set_backend_to_specific_version(backend_module)
# We know for sure that the backend stack is empty
# no need to do backend unsetting
ivy_pack.utils.backend.handler._set_module_backend(
ivy_pack.__dict__.copy(), ivy_pack, backend_module
)
# TODO use a refactored code from ivy.set_backend
for key in ivy_pack.__dict__.keys():
if key in ivy_pack.functional.__dict__ and not key.startswith("__"):
ivy_pack.functional.__dict__[key] = ivy_pack.ivy.__dict__[key]
ivy_pack.backend_stack.append(backend_module)
ivy_pack.utils.backend._importlib.import_cache = copy.copy(
_importlib.import_cache
)
_compiled_backends_ids[ivy_pack._compiled_id] = ivy_pack
_importlib._clear_cache()
try:
compiled_backends[backend].append(ivy_pack)
except KeyError:
compiled_backends[backend] = [ivy_pack]
if ivy.backend != backend:
# to avoid warning users when not using set_backend with ivy.Array.__repr__
_handle_inplace_mode(ivy_pack=ivy_pack)
return ivy_pack
| ivy/ivy/utils/backend/handler.py/0 | {
"file_path": "ivy/ivy/utils/backend/handler.py",
"repo_id": "ivy",
"token_count": 8294
} | 44 |
from pytest import mark
from pathlib import Path
skip_ids = []
skips_path = Path(__file__).parent / "skips.txt"
if skips_path.exists():
with open(skips_path) as f:
for line in f:
if line.startswith("ivy_tests"):
id_ = line.strip("\n")
skip_ids.append(id_)
def pytest_collection_modifyitems(items):
skip_ivy = mark.skip(reason="ivy skip - see ivy_tests/skips.txt for details")
for item in items:
# skip if specified in skips.txt
for id_ in skip_ids:
if item.nodeid.startswith(id_):
item.add_marker(skip_ivy)
break
| ivy/ivy_tests/array_api_testing/conftest.py/0 | {
"file_path": "ivy/ivy_tests/array_api_testing/conftest.py",
"repo_id": "ivy",
"token_count": 311
} | 45 |
from hypothesis import strategies as st
from hypothesis.internal.floats import float_of
# local
from . import general_helpers as gh, dtype_helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
floats_info = {
"float16": {"cast_type": "float16", "width": 16},
"bfloat16": {"cast_type": "float32", "width": 32},
"float32": {"cast_type": "float32", "width": 32},
"float64": {"cast_type": "float64", "width": 64},
}
@st.composite
def floats(
draw,
*,
min_value=None,
max_value=None,
abs_smallest_val=None,
allow_nan=False,
allow_inf=False,
allow_subnormal=False,
exclude_min=True,
exclude_max=True,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
mixed_fn_compos=True,
):
"""Draws an arbitrarily sized list of floats with a safety factor applied
to avoid values being generated at the edge of a dtype limit.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
min_value
minimum value of floats generated.
max_value
maximum value of floats generated.
abs_smallest_val
the absolute smallest representable value of the data type.
allow_nan
if True, allow Nans in the list.
allow_inf
if True, allow inf in the list.
allow_subnormal
if True, allow subnormals in the list.
exclude_min
if True, exclude the minimum limit.
exclude_max
if True, exclude the maximum limit.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use for the safety factor scaling. Can be "linear" or "log".
Default value = "linear".
mixed_fn_compos
boolean if True, the function will generate using the float dtypes
of the compositional implementation for mixed partial functions and
if False, it will generate using the float dtypes of the
primary implementation.
Returns
-------
ret
A strategy that draws floats.
"""
# ToDo assert that if min or max can be represented
dtype = draw(
dtype_helpers.get_dtypes(
"float", mixed_fn_compos=mixed_fn_compos, full=False, prune_function=False
)
)
dtype = dtype[0]
# ToDo add support for not applying safety factor
min_value, max_value, abs_smallest_val = gh.apply_safety_factor(
dtype,
backend=test_globals.CURRENT_BACKEND,
min_value=min_value,
max_value=max_value,
abs_smallest_val=abs_smallest_val,
small_abs_safety_factor=small_abs_safety_factor,
large_abs_safety_factor=large_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
# The smallest possible value is determined by one of the arguments
if min_value > -abs_smallest_val or max_value < abs_smallest_val:
float_strategy = st.floats(
min_value=float_of(min_value, floats_info[dtype]["width"]),
max_value=float_of(max_value, floats_info[dtype]["width"]),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
)
else:
float_strategy = st.one_of(
st.floats(
min_value=float_of(min_value, floats_info[dtype]["width"]),
max_value=float_of(-abs_smallest_val, floats_info[dtype]["width"]),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
),
st.floats(
min_value=float_of(abs_smallest_val, floats_info[dtype]["width"]),
max_value=float_of(max_value, floats_info[dtype]["width"]),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
),
)
values = draw(float_strategy)
return values
@st.composite
def ints(
draw,
*,
min_value=None,
max_value=None,
safety_factor=1.1,
safety_factor_scale=None,
mixed_fn_compos=True,
):
"""Draws an integer with a safety factor if specified.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
min_value
minimum value of integers generated.
max_value
maximum value of integers generated.
safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
safety_factor_scale
The operation to use for the safety factor scaling. Can be "linear" or "log".
Default value = "linear".
mixed_fn_compos
boolean if True, the function will generate using the integer dtypes
of the compositional implementation for mixed partial functions and
if False, it will generate using the integer dtypes of the
primary implementation.
Returns
-------
ret
A strategy that draws integers.
"""
dtype = draw(
dtype_helpers.get_dtypes(
"integer", mixed_fn_compos=mixed_fn_compos, full=False, prune_function=False
)
)
if min_value is None and max_value is None:
safety_factor_scale = "linear"
if safety_factor_scale is not None:
min_value, max_value, _ = gh.apply_safety_factor(
dtype[0],
backend=test_globals.CURRENT_BACKEND,
min_value=min_value,
max_value=max_value,
large_abs_safety_factor=safety_factor,
safety_factor_scale=safety_factor_scale,
)
return draw(st.integers(min_value, max_value))
@st.composite
def number(
draw,
*,
min_value=None,
max_value=None,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
mixed_fn_compos=True,
):
"""Draws integers or floats with a safety factor applied to values.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
min_value
minimum value of integers generated.
max_value
maximum value of integers generated.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
this has no effect on integer data types.
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use for the safety factor scaling. Can be "linear" or "log".
Default value = "linear".
mixed_fn_compos
boolean if True, the function will generate using the numeric dtypes
of the compositional implementation for mixed partial functions and
if False, it will generate using the numeric dtypes of the
primary implementation.
Returns
-------
ret
A strategy that draws integers or floats.
"""
return draw(
ints(
min_value=min_value,
max_value=max_value,
safety_factor=large_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
mixed_fn_compos=mixed_fn_compos,
)
| floats(
min_value=min_value,
max_value=max_value,
small_abs_safety_factor=small_abs_safety_factor,
large_abs_safety_factor=large_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
mixed_fn_compos=mixed_fn_compos,
)
)
| ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/number_helpers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/number_helpers.py",
"repo_id": "ivy",
"token_count": 4197
} | 46 |
from .base import FrontendConfigWithBackend
def get_config():
return TensorflowFrontendConfig()
class TensorflowFrontendConfig(FrontendConfigWithBackend):
backend_str = "tensorflow"
| ivy/ivy_tests/test_ivy/test_frontends/config/tensorflow.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/tensorflow.py",
"repo_id": "ivy",
"token_count": 60
} | 47 |
# global
import numpy as np
import ivy.functional.frontends.jax.lax as jlax
import ivy.functional.frontends.jax.numpy as jnp
from hypothesis import assume, strategies as st
import random
from jax.lax import ConvDimensionNumbers
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import (
_reduce_window_helper,
)
from ivy_tests.test_ivy.test_functional.test_nn.test_layers import (
_assume_tf_dilation_gt_1,
)
from ivy.functional.frontends.jax.numpy import can_cast
from ivy.functional.frontends.jax.lax.operators import (
_dimension_numbers,
_argsort_tuple,
)
# --- Helpers --- #
# --------------- #
# noinspection DuplicatedCode
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=4),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=draw(helpers.get_dtypes("numeric")),
shared_dtype=True,
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes, unique_idx
@st.composite
def _div_dtypes_and_xs(draw):
dtype, dividend, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), ret_shape=True
)
)
divisor = draw(
helpers.array_values(dtype=dtype[0], min_value=-20, max_value=20, shape=shape)
)
return dtype, [dividend[0], divisor]
# select
@st.composite
def _dtype_pred_ontrue_on_false(draw):
shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1))
pred = draw(helpers.array_values(dtype="bool", shape=shape))
dtypes, on_true_on_false = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shape=shape,
large_abs_safety_factor=16,
small_abs_safety_factor=16,
safety_factor_scale="log",
shared_dtype=True,
)
)
return dtypes, pred, on_true_on_false
@st.composite
def _dtype_values_dims(draw):
dtype, values, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
ret_shape=True,
)
)
size = len(shape)
permutations = draw(
st.lists(
st.integers(min_value=0, max_value=len(shape) - 1),
min_size=size,
max_size=size,
unique=True,
)
)
return dtype, values, tuple(permutations)
@st.composite
def _fill_value(draw):
dtype = draw(helpers.get_dtypes("numeric", full=False, key="dtype"))[0]
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.is_uint_dtype(dtype):
return draw(helpers.ints(min_value=0, max_value=5))
elif ivy_backend.is_int_dtype(dtype):
return draw(helpers.ints(min_value=-5, max_value=5))
return draw(helpers.floats(min_value=-5, max_value=5))
@st.composite
def _general_dot_helper(draw):
input_dtype, lhs, lshape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-1e04,
max_value=1e04,
min_num_dims=2,
ret_shape=True,
)
)
ndims = len(lshape)
perm_id = random.sample(list(range(ndims)), ndims)
rshape = [lshape[i] for i in perm_id]
input_dtype, rhs = draw(
helpers.dtype_and_values(
dtype=input_dtype,
min_value=-1e04,
max_value=1e04,
shape=rshape,
)
)
ind_list = list(range(ndims))
batch_n = draw(st.integers(min_value=1, max_value=len(lshape) - 1))
lhs_batch = random.sample(ind_list, batch_n)
rhs_batch = [perm_id.index(i) for i in lhs_batch]
lhs_contracting = [i for i in ind_list if i not in lhs_batch]
rhs_contracting = [perm_id.index(i) for i in lhs_contracting]
is_pref = draw(st.booleans())
pref_dtype = None
if is_pref:
uint_cast_st = helpers.get_castable_dtype(
draw(helpers.get_dtypes("unsigned")),
input_dtype[0],
)
int_cast_st = helpers.get_castable_dtype(
draw(helpers.get_dtypes("signed_integer")),
input_dtype[0],
)
float_cast_st = helpers.get_castable_dtype(
draw(helpers.get_dtypes("float")),
input_dtype[0],
)
complex_cast_st = helpers.get_castable_dtype(
draw(helpers.get_dtypes("complex")),
input_dtype[0],
)
if "uint" in input_dtype[0]:
pref_dtype = draw(st.one_of(uint_cast_st, float_cast_st))[-1]
elif "int" in input_dtype[0]:
pref_dtype = draw(st.one_of(int_cast_st, float_cast_st))[-1]
elif "float" in input_dtype[0]:
pref_dtype = draw(float_cast_st)[-1]
elif "complex" in input_dtype[0]:
pref_dtype = draw(complex_cast_st)[-1]
else:
raise ivy.exceptions.IvyException("unsupported dtype")
return (
input_dtype * 2,
(lhs[0], rhs[0]),
((lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch)),
pref_dtype,
)
@st.composite
def _get_clamp_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=shape,
)
)
min = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=-25, max_value=0)
)
max = draw(
helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=1, max_value=25)
)
return x_dtype, x, min, max
@st.composite
def _get_dtype_inputs_for_batch_matmul(draw):
dtype, lhs = draw(
helpers.dtype_and_values(
min_num_dims=2,
max_num_dims=6,
min_value=2,
max_value=5,
)
)
lhs_shape = lhs[0].shape
rhs_shape = list(lhs_shape)
rhs_shape[-1], rhs_shape[-2] = rhs_shape[-2], rhs_shape[-1]
rhs_shape = tuple(rhs_shape)
rhs = draw(
helpers.array_values(
dtype=dtype[0],
shape=rhs_shape,
min_value=2,
max_value=5,
)
)
return dtype, lhs[0], rhs
@st.composite
def _get_dtype_inputs_for_dot(draw):
dim_size = draw(helpers.ints(min_value=1, max_value=5))
dtype = draw(helpers.get_dtypes("numeric", index=1, full=False))
if dim_size == 1:
lhs = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5
)
)
rhs = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5
)
)
else:
lhs = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5
)
)
rhs = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5
)
)
is_pref = draw(st.booleans())
if is_pref:
dtype, values, pref = draw(
helpers.get_castable_dtype(
draw(helpers.get_dtypes("numeric")), dtype[0], [lhs, rhs]
)
)
assume(can_cast(dtype, pref))
return [dtype], pref, values[0], values[1]
else:
return dtype, None, lhs, rhs
def _get_reduce_func(dtype):
if dtype[0] == "bool":
return st.sampled_from([jnp.logical_and, jnp.logical_or])
else:
return st.sampled_from([jlax.add, jlax.max, jlax.min, jlax.mul, jnp.multiply])
@st.composite
def _pad_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("bool"),
ret_shape=True,
min_num_dims=1,
min_dim_size=2,
min_value=-100,
max_value=100,
).filter(lambda _x: _x[0][0] not in ["float16", "bfloat16"])
)
ndim = len(shape)
min_dim = min(shape)
padding_config = draw(
st.lists(
st.tuples(
st.integers(min_value=-(min_dim - 1), max_value=min_dim - 1),
st.integers(min_value=-(min_dim - 1), max_value=min_dim - 1),
st.integers(min_value=0, max_value=min_dim - 1),
),
min_size=ndim,
max_size=ndim,
)
)
padding_value = draw(st.booleans())
return dtype, x[0], padding_value, padding_config
@st.composite
def _reshape_helper(draw):
# generate a shape s.t len(shape) > 0
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
)
)
reshape_shape = draw(helpers.reshape_shapes(shape=shape))
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
is_dim = draw(st.booleans())
if is_dim:
dims = [x for x in range(len(shape))]
permut = draw(st.permutations(dims))
return x, dtypes, reshape_shape, permut
else:
return x, dtypes, reshape_shape, None
@st.composite
def _slice_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
ret_shape=True,
),
)
start_indices, limit_indices, strides = [], [], []
for i in shape:
start_indices += [draw(st.integers(min_value=0, max_value=i - 1))]
limit_indices += [
draw(
st.integers(min_value=0, max_value=i - 1).filter(
lambda _x: _x > start_indices[-1]
)
)
]
strides += [draw(st.integers(min_value=1, max_value=i))]
return dtype, x, start_indices, limit_indices, strides
@st.composite
def _slice_in_dim_helper(draw):
dtype, x, axis = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
force_int_axis=True,
valid_axis=True,
),
)
operand = x[0]
start_index = draw(
st.integers(min_value=-abs(operand.shape[axis]), max_value=operand.shape[axis])
)
if start_index < 0:
limit_index = draw(
st.integers(
min_value=start_index + operand.shape[axis],
max_value=operand.shape[axis],
)
)
else:
limit_index = draw(
st.integers(
min_value=-abs(operand.shape[axis]), max_value=operand.shape[axis]
).filter(lambda _x: _x >= start_index)
)
stride = draw(st.integers(min_value=1, max_value=abs(limit_index + 1)))
return dtype, x, start_index, limit_index, stride, axis
# squeeze
@st.composite
def _squeeze_helper(draw):
shape = draw(st.shared(helpers.get_shape(), key="value_shape"))
valid_axes = []
for index, axis in enumerate(shape):
if axis == 1:
valid_axes.append(index)
return valid_axes
@st.composite
def _x_and_filters(draw, dim=2, transpose=False, general=False):
if not isinstance(dim, int):
dim = draw(dim)
batch_size = draw(st.integers(1, 5))
filter_shape = draw(
helpers.get_shape(
min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5
)
)
dtype = draw(helpers.get_dtypes("float", full=False))
padding = draw(
st.one_of(
st.lists(
st.tuples(
st.integers(min_value=0, max_value=3),
st.integers(min_value=0, max_value=3),
),
min_size=dim,
max_size=dim,
),
st.sampled_from(["SAME", "VALID"]),
)
)
input_channels = draw(st.integers(1, 3))
output_channels = draw(st.integers(1, 3))
group_list = [i for i in range(1, 6)]
if not transpose:
group_list = list(filter(lambda x: (input_channels % x == 0), group_list))
else:
group_list = list(filter(lambda x: (output_channels % x == 0), group_list))
fc = draw(st.sampled_from(group_list)) if general else 1
strides = draw(st.lists(st.integers(1, 3), min_size=dim, max_size=dim))
dilations = draw(st.lists(st.integers(1, 3), min_size=dim, max_size=dim))
if general:
if dim == 2:
dim_num_st1 = st.sampled_from(["NCHW", "NHWC"])
dim_num_st2 = st.sampled_from(["OIHW", "HWIO"])
elif dim == 1:
dim_num_st1 = st.sampled_from(["NWC", "NCW"])
dim_num_st2 = st.sampled_from(["OIW", "WIO"])
else:
dim_num_st1 = st.sampled_from(["NDHWC", "NCDHW"])
dim_num_st2 = st.sampled_from(["OIDHW", "DHWIO"])
dim_seq = [*range(0, dim + 2)]
dimension_numbers = draw(
st.sampled_from(
[
None,
(draw(dim_num_st1), draw(dim_num_st2), draw(dim_num_st1)),
ConvDimensionNumbers(
*map(
tuple,
draw(
st.lists(
st.permutations(dim_seq), min_size=3, max_size=3
)
),
)
),
]
)
)
else:
dimension_numbers = (
("NCH", "OIH", "NCH")
if dim == 1
else ("NCHW", "OIHW", "NCHW")
if dim == 2
else ("NCDHW", "OIDHW", "NCDHW")
)
dim_nums = _dimension_numbers(dimension_numbers, dim + 2, transp=transpose)
if not transpose:
output_channels = output_channels * fc
channel_shape = (output_channels, input_channels // fc)
else:
input_channels = input_channels * fc
channel_shape = (output_channels // fc, input_channels)
x_dim = []
for i in range(dim):
min_x = filter_shape[i] + (filter_shape[i] - 1) * (dilations[i] - 1)
x_dim.append(draw(st.integers(min_x, min_x + 1)))
x_shape = (batch_size, input_channels, *x_dim)
filter_shape = channel_shape + filter_shape
vals = draw(
helpers.array_values(
dtype=dtype[0],
shape=x_shape,
min_value=0.0,
max_value=1.0,
)
)
vals = ivy.permute_dims(vals, axes=_argsort_tuple(dim_nums[0]))
filters = draw(
helpers.array_values(
dtype=dtype[0],
shape=filter_shape,
min_value=0.0,
max_value=1.0,
)
)
filters = ivy.permute_dims(filters, axes=_argsort_tuple(dim_nums[1]))
if general and not transpose:
x_dilation = draw(st.lists(st.integers(1, 3), min_size=dim, max_size=dim))
dilations = (dilations, x_dilation)
if draw(st.booleans()):
p_dtype, pref = draw(
helpers.get_castable_dtype(draw(helpers.get_dtypes("float")), dtype[0])
)
assume(can_cast(p_dtype, pref))
else:
pref = None
return (
dtype,
vals,
filters,
dilations,
dimension_numbers,
strides,
padding,
fc,
pref,
)
# --- Main --- #
# ------------ #
# abs
@handle_frontend_test(
fn_tree="jax.lax.abs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
),
test_with_out=st.just(False),
)
def test_jax_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# acos
@handle_frontend_test(
fn_tree="jax.lax.acos",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_acos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# add
@handle_frontend_test(
fn_tree="jax.lax.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_add(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="jax.lax.argmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=False,
),
index_dtype=helpers.get_dtypes("integer", full=False),
test_with_out=st.just(False),
)
def test_jax_argmax(
*,
dtype_x_axis,
index_dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
operand=x[0],
axis=axis,
index_dtype=index_dtype[0],
)
@handle_frontend_test(
fn_tree="jax.lax.argmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=False,
),
index_dtype=helpers.get_dtypes("integer", full=False),
test_with_out=st.just(False),
)
def test_jax_argmin(
*,
dtype_x_axis,
index_dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
axis=axis,
index_dtype=index_dtype[0],
)
# asin
@handle_frontend_test(
fn_tree="jax.lax.asin",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_asin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# asinh
@handle_frontend_test(
fn_tree="jax.lax.asinh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_asinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# atan
@handle_frontend_test(
fn_tree="jax.lax.atan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_atan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# atan2
@handle_frontend_test(
fn_tree="jax.lax.atan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_jax_atan2(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# atanh
@handle_frontend_test(
fn_tree="jax.lax.atanh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_atanh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.batch_matmul",
dtypes_and_xs=_get_dtype_inputs_for_batch_matmul(),
test_with_out=st.just(False),
)
def test_jax_batch_matmul(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, lhs, rhs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
lhs=lhs,
rhs=rhs,
precision=None,
)
# bitwise_and
@handle_frontend_test(
fn_tree="jax.lax.bitwise_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_bitwise_and(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# bitwise_not
@handle_frontend_test(
fn_tree="jax.lax.bitwise_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=1,
),
test_with_out=st.just(False),
)
def test_jax_bitwise_not(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# bitwise_or
@handle_frontend_test(
fn_tree="jax.lax.bitwise_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_bitwise_or(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# bitwise_xor
@handle_frontend_test(
fn_tree="jax.lax.bitwise_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_bitwise_xor(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="jax.lax.broadcast",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
sizes=helpers.get_shape(min_num_dims=1),
test_with_out=st.just(False),
)
def test_jax_broadcast(
*,
dtype_and_x,
sizes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
sizes=sizes,
)
# cbrt
@handle_frontend_test(
fn_tree="jax.lax.cbrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), min_value=0.0
),
test_with_out=st.just(False),
)
def test_jax_cbrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_ceil(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.clamp",
dtype_x_min_max=_get_clamp_inputs(),
test_with_out=st.just(False),
)
def test_jax_clamp(
*,
dtype_x_min_max,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, x, min_vals, max_vals = dtype_x_min_max
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
min=min_vals,
x=x[0],
max=max_vals,
)
# complex
@handle_frontend_test(
fn_tree="jax.lax.complex",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_complex(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# concat
@handle_frontend_test(
fn_tree="jax.lax.concatenate",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
test_with_out=st.just(False),
)
def test_jax_concat(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operands=xs,
dimension=unique_idx,
)
# conj
@handle_frontend_test(
fn_tree="jax.lax.conj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["complex64"],
),
)
def test_jax_conj(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.conv",
x_f_d_other=_x_and_filters(),
test_with_out=st.just(False),
)
def test_jax_conv(
*,
x_f_d_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, filters, dilation, dim_num, stride, pad, fc, pref = x_f_d_other
_assume_tf_dilation_gt_1(backend_fw, on_device, dilation)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
lhs=x,
rhs=filters,
window_strides=stride,
padding=pad,
precision=None,
preferred_element_type=pref,
)
@handle_frontend_test(
fn_tree="jax.lax.conv_general_dilated",
x_f_d_other=_x_and_filters(general=True),
test_with_out=st.just(False),
)
def test_jax_conv_general_dilated(
*,
x_f_d_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, filters, dilations, dim_num, stride, pad, fc, pref = x_f_d_other
_assume_tf_dilation_gt_1(ivy.current_backend_str(), on_device, dilations[0])
assume(not isinstance(pad, str) or len(dilations[1]) == dilations[1].count(1))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
lhs=x,
rhs=filters,
window_strides=stride,
padding=pad,
lhs_dilation=dilations[1],
rhs_dilation=dilations[0],
dimension_numbers=dim_num,
feature_group_count=fc,
batch_group_count=1,
precision=None,
preferred_element_type=pref,
)
@handle_frontend_test(
fn_tree="jax.lax.conv_transpose",
x_f_d_other=_x_and_filters(general=True, transpose=True),
test_with_out=st.just(False),
)
def test_jax_conv_transpose(
*,
x_f_d_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, filters, dilation, dim_num, stride, pad, fc, pref = x_f_d_other
_assume_tf_dilation_gt_1(ivy.current_backend_str(), on_device, dilation)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
lhs=x,
rhs=filters,
strides=stride,
padding=pad,
rhs_dilation=dilation,
dimension_numbers=dim_num,
transpose_kernel=False,
precision=None,
preferred_element_type=pref,
)
@handle_frontend_test(
fn_tree="jax.lax.convert_element_type",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
new_dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_jax_convert_element_type(
*,
dtype_and_x,
new_dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(can_cast(input_dtype[0], new_dtype[0]))
helpers.test_frontend_function(
input_dtypes=input_dtype + new_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
new_dtype=new_dtype[0],
)
# cos
@handle_frontend_test(
fn_tree="jax.lax.cos",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_cos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cosh
@handle_frontend_test(
fn_tree="jax.lax.cosh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_cosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cummin
@handle_frontend_test(
fn_tree="jax.lax.cummin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_cummin(
*,
dtype_x_axis,
reverse,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
operand=x[0],
axis=axis,
reverse=reverse,
)
@handle_frontend_test(
fn_tree="jax.lax.cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_value=-5,
max_value=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_cumprod(
*,
dtype_x_axis,
reverse,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
operand=x[0],
axis=axis,
reverse=reverse,
)
@handle_frontend_test(
fn_tree="jax.lax.cumsum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_cumsum(
*,
dtype_x_axis,
reverse,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
axis=axis,
reverse=reverse,
)
@handle_frontend_test(
fn_tree="jax.lax.div",
dtypes_and_xs=_div_dtypes_and_xs(),
test_with_out=st.just(False),
)
def test_jax_div(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
assume(not np.any(np.isclose(xs[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.dot",
dtypes_and_xs=_get_dtype_inputs_for_dot(),
test_with_out=st.just(False),
)
def test_jax_dot(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, dtype, lhs, rhs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
lhs=lhs,
rhs=rhs,
precision=None,
preferred_element_type=dtype,
)
@handle_frontend_test(
fn_tree="jax.lax.dot_general",
dtypes_lr_dims=_general_dot_helper(),
test_with_out=st.just(False),
)
def test_jax_dot_general(
*,
dtypes_lr_dims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, lr, dims, dtype = dtypes_lr_dims
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
lhs=lr[0],
rhs=lr[1],
dimension_numbers=dims,
precision=None,
preferred_element_type=dtype,
)
@handle_frontend_test(
fn_tree="jax.lax.eq",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_eq(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.erf",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_erf(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
)
# erfc
@handle_frontend_test(
fn_tree="jax.lax.erfc",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
test_with_out=st.just(False),
)
def test_jax_erfc(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_exp(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# expand_dims
@handle_frontend_test(
fn_tree="jax.lax.expand_dims",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
force_int_axis=True,
valid_axis=True,
),
test_with_out=st.just(False),
)
def test_jax_expand_dims(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=x[0],
dimensions=(axis,),
)
@handle_frontend_test(
fn_tree="jax.lax.expm1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_expm1(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.full",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
fill_value=_fill_value(),
dtypes=helpers.get_dtypes("numeric", full=False, key="dtype"),
)
def test_jax_full(
*,
shape,
fill_value,
dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
shape=shape,
fill_value=fill_value,
dtype=dtypes[0],
)
@handle_frontend_test(
fn_tree="jax.lax.full_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", full=False, key="dtype")
),
fill_val=_fill_value(),
shape=st.one_of(helpers.get_shape() | st.none()),
dtype=st.shared(helpers.get_dtypes("numeric", full=False), key="dtype"),
test_with_out=st.just(False),
)
def test_jax_full_like(
*,
dtype_and_x,
fill_val,
shape,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
fill_val = fill_val
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
fill_value=fill_val,
dtype=dtype,
shape=shape,
)
@handle_frontend_test(
fn_tree="jax.lax.ge",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_ge(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.gt",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_gt(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# igamma
@handle_frontend_test(
fn_tree="jax.lax.igamma",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_igamma(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, (x, y) = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
x=x,
y=y,
)
# imag
@handle_frontend_test(
fn_tree="jax.lax.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex")
),
)
def test_jax_imag(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
x=x[0],
)
# iota
@handle_frontend_test(
fn_tree="jax.lax.iota",
dtypes=helpers.get_dtypes("valid", full=False),
size=helpers.ints(min_value=0, max_value=10),
test_with_out=st.just(False),
)
def test_jax_iota(
*,
dtypes,
size,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
dtype=dtypes[0],
size=size,
)
# is_finite
@handle_frontend_test(
fn_tree="jax.lax.is_finite",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_is_finite(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.le",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_le(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
),
test_with_out=st.just(False),
)
def test_jax_log(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log1p
@handle_frontend_test(
fn_tree="jax.lax.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_log1p(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.lt",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_lt(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# max
@handle_frontend_test(
fn_tree="jax.lax.max",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_max(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="jax.lax.min",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_min(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.mul",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
small_abs_safety_factor=2,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_mul(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.ne",
dtypes_and_xs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_ne(
*,
dtypes_and_xs,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
@handle_frontend_test(
fn_tree="jax.lax.neg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
),
test_with_out=st.just(False),
)
def test_jax_neg(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# nextafter
@handle_frontend_test(
fn_tree="jax.lax.nextafter",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
min_value=-100,
max_value=100,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_nextafter(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.pad",
dtype_x_params=_pad_helper(),
test_with_out=st.just(False),
)
def test_jax_pad(
*,
dtype_x_params,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, operand, padding_value, padding_config = dtype_x_params
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=operand,
padding_value=padding_value,
padding_config=padding_config,
)
@handle_frontend_test(
fn_tree="jax.lax.pow",
dtypes_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_pow(
*,
dtypes_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, xs = dtypes_and_values
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# real
@handle_frontend_test(
fn_tree="jax.lax.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("complex")
),
)
def test_jax_real(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
test_values=True,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_reciprocal(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.reduce_window",
all_args=_reduce_window_helper(_get_reduce_func),
test_with_out=st.just(False),
)
def test_jax_reduce_window(
*,
all_args,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, operand, init_value, computation, others, padding = all_args
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=operand[0],
init_value=init_value[0],
computation=computation,
window_dimensions=others[0],
window_strides=others[1],
padding=padding,
base_dilation=others[2],
window_dilation=None,
)
@handle_frontend_test(
fn_tree="jax.lax.rem",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
allow_inf=False,
),
test_with_out=st.just(False),
)
def test_jax_rem(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0))) # ToDO, should use safety factor?
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="jax.lax.reshape",
x_reshape_permut=_reshape_helper(),
test_with_out=st.just(False),
)
def test_jax_reshape(
*,
x_reshape_permut,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x, dtype, shape, dimensions = x_reshape_permut
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
new_sizes=shape,
dimensions=dimensions,
)
@handle_frontend_test(
fn_tree="jax.lax.rev",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=2,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=False,
),
test_with_out=st.just(False),
)
def test_jax_rev(
*,
dtype_x_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
dimensions=(axis,),
)
# round
@handle_frontend_test(
fn_tree="jax.lax.round",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
rounding_method=st.sampled_from([0, 1]),
test_with_out=st.just(False),
)
def test_jax_round(
*,
dtype_and_x,
rounding_method,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
rounding_method=rounding_method,
)
@handle_frontend_test(
fn_tree="jax.lax.rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_jax_rsqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.select",
dtype_pred_ontrue_on_false=_dtype_pred_ontrue_on_false(),
test_with_out=st.just(False),
)
def test_jax_select(
*,
dtype_pred_ontrue_on_false,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, pred, on_true_on_false = dtype_pred_ontrue_on_false
helpers.test_frontend_function(
input_dtypes=["bool"] + input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
pred=pred,
on_true=on_true_on_false[0],
on_false=on_true_on_false[0],
)
@handle_frontend_test(
fn_tree="jax.lax.shift_left",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_shift_left(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
x[1] = np.asarray(
np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1]
)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="jax.lax.shift_right_logical",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_value=0,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_shift_right_logical(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
# negative shifts will throw an exception
# shifts >= dtype width produce backend-defined behavior
x[1] = np.asarray(
np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1]
)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# sign
@handle_frontend_test(
fn_tree="jax.lax.sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
test_with_out=st.just(False),
)
def test_jax_sign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sin
@handle_frontend_test(
fn_tree="jax.lax.sin",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_sin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sinh
@handle_frontend_test(
fn_tree="jax.lax.sinh",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_sinh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.slice",
dtype_x_params=_slice_helper(),
test_with_out=st.just(False),
)
def test_jax_slice(
*,
dtype_x_params,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, start_indices, limit_indices, strides = dtype_x_params
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
start_indices=start_indices,
limit_indices=limit_indices,
strides=strides,
)
@handle_frontend_test(
fn_tree="jax.lax.slice_in_dim",
dtype_x_params=_slice_in_dim_helper(),
test_with_out=st.just(False),
)
def test_jax_slice_in_dim(
*,
dtype_x_params,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, start_index, limit_index, stride, axis = dtype_x_params
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
start_index=start_index,
limit_index=limit_index,
stride=stride,
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.lax.sort",
dtype_x_bounded_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
is_stable=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_sort(
*,
dtype_x_bounded_axis,
is_stable,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_x_bounded_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
dimension=axis,
is_stable=is_stable,
)
# sqrt
@handle_frontend_test(
fn_tree="jax.lax.sqrt",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_sqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
small_abs_safety_factor=2,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_square(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.squeeze",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(
helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=5,
),
key="value_shape",
),
),
dim=_squeeze_helper(),
)
def test_jax_squeeze(
*,
dtype_and_values,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
array=value[0],
dimensions=dim,
)
@handle_frontend_test(
fn_tree="jax.lax.sub",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_sub(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# tan
@handle_frontend_test(
fn_tree="jax.lax.tan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_jax_tan(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tie_in
@handle_frontend_test(
fn_tree="jax.lax.tie_in",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_tie_in(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# top_k
@handle_frontend_test(
fn_tree="jax.lax.top_k",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
min_dim_size=4,
max_dim_size=10,
),
k=helpers.ints(min_value=1, max_value=4),
test_with_out=st.just(False),
)
def test_jax_top_k(
*,
dtype_and_x,
k,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
k=k,
# test_values=False,
)
@handle_frontend_test(
fn_tree="jax.lax.transpose",
dtype_x_dims=_dtype_values_dims(),
test_with_out=st.just(False),
)
def test_jax_transpose(
*,
dtype_x_dims,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, dims = dtype_x_dims
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
operand=x[0],
permutation=dims,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_operators.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_operators.py",
"repo_id": "ivy",
"token_count": 39585
} | 48 |
from ivy_tests.test_ivy.test_frontends import NativeClass
mindspore_classes_to_ivy_classes = {}
def convmindspore(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for jax."""
if isinstance(argument, NativeClass):
return mindspore_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_mindspore/__init__.py",
"repo_id": "ivy",
"token_count": 116
} | 49 |
# global
import numpy as np
from hypothesis import assume, strategies as st, given
# local
import ivy
from ivy.functional.frontends.numpy import ndarray
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import (
handle_frontend_method,
assert_all_close,
BackendHandler,
)
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_first_matrix_and_dtype,
_get_second_matrix_and_dtype,
)
# from ivy.functional.frontends.numpy import ndarray
from ivy_tests.test_ivy.test_frontends.test_numpy.test_mathematical_functions.test_miscellaneous import ( # noqa
_get_clip_inputs,
)
from ivy_tests.test_ivy.test_frontends.test_numpy.test_mathematical_functions.test_sums_products_differences import ( # noqa
_get_castable_dtypes_values,
)
from ivy_tests.test_ivy.test_frontends.test_numpy.test_manipulation_routines.test_changing_number_of_dimensions import ( # noqa
_squeeze_helper,
)
CLASS_TREE = "ivy.functional.frontends.numpy.ndarray"
# --- Helpers --- #
# --------------- #
# item
@st.composite
def _item_helper(draw):
dtype = draw(
helpers.array_dtypes(
num_arrays=1,
available_dtypes=helpers.get_dtypes("numeric"),
)
)
shape = draw(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=10,
)
)
array = draw(
helpers.array_values(
dtype=dtype[0],
shape=shape,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
)
)
index = ()
for s in shape:
index += (draw(st.integers(min_value=-s + 1, max_value=s - 1)),)
index_samples = [index, draw(helpers.ints(min_value=0, max_value=array.size - 1))]
if array.size == 1:
index_samples.append(None)
sampled_index = draw(st.sampled_from(index_samples))
if sampled_index is None:
method_all_as_kwargs_np = {}
num_positional_args = 0
else:
method_all_as_kwargs_np = {"args": sampled_index}
num_positional_args = 1
return dtype, array, method_all_as_kwargs_np, num_positional_args
# swapaxes
@st.composite
def dtype_values_and_axes(draw):
dtype, x, x_shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
ret_shape=True,
)
)
axis1, axis2 = draw(
helpers.get_axis(
shape=x_shape,
sort_values=False,
unique=True,
min_size=2,
max_size=2,
force_tuple=True,
)
)
return dtype, x, axis1, axis2
@st.composite
def dtypes_x_reshape(draw):
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
)
)
shape = draw(helpers.reshape_shapes(shape=np.array(x).shape))
return dtypes, x, shape
# --- Main --- #
# ------------ #
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_T(
dtype_x,
backend_fw,
frontend,
):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ret = helpers.flatten_and_to_np(ret=x.T.ivy_array, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ivy_backend.permute_dims(
ivy_backend.native_array(data[0]), list(range(len(shape)))[::-1]
),
backend=backend_fw,
)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__abs__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
allow_inf=False,
large_abs_safety_factor=4,
safety_factor_scale="linear",
),
)
def test_numpy___abs__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__add__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___add__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__and__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___and__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __array__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__array__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy___array__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"dtype": np.dtype(input_dtypes[0]),
},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
# __array_wrap__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__array_wrap__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
),
)
def test_numpy___array_wrap__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"array": x[1],
"context": None,
},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__bool__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_dim_size=1,
),
)
def test_numpy___bool__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__complex__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_dim_size=1,
max_dim_size=1,
),
)
def test_numpy___complex__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__contains__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy___contains__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
key = np.asarray(xs[0].reshape(-1)[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"key": key,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__copy__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_numpy___copy__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__deepcopy__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_numpy___deepcopy__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"memo": {},
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__eq__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___eq__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__float__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
),
)
def test_numpy___float__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
# Numpy doesn't support complex to float conversion
assume(not np.issubdtype(input_dtypes[0], np.complexfloating))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __floordiv__ test
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__floordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=4,
safety_factor_scale="linear",
shared_dtype=True,
),
)
def test_numpy___floordiv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
assume(not np.any(np.isclose(xs[1], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
init_flags=init_flags,
method_flags=method_flags,
frontend_method_data=frontend_method_data,
frontend=frontend,
atol_=1,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ge__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___ge__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__gt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___gt__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__iadd__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___iadd__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__iand__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___iand__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ifloordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=4,
safety_factor_scale="linear",
shared_dtype=True,
),
)
def test_numpy___ifloordiv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
assume(not np.any(np.isclose(xs[1], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
init_flags=init_flags,
method_flags=method_flags,
frontend_method_data=frontend_method_data,
frontend=frontend,
atol_=1,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__imod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=0,
exclude_min=True,
),
)
def test_numpy___imod__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__imul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___imul__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__int__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
min_value=-1e15,
max_value=1e15,
),
)
def test_numpy___int__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
# Numpy doesn't support complex to int conversion
assume(not np.issubdtype(input_dtypes[0], np.complexfloating))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__invert__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="integer"),
num_arrays=1,
),
)
def test_numpy___invert__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ior__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___ior__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ipow__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
power=helpers.ints(min_value=1, max_value=3),
)
def test_numpy___ipow__(
dtype_and_x,
power,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": power,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __irshift__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__irshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
),
)
def test_numpy___irshift__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
max_bits = np.iinfo(input_dtypes[0]).bits
max_shift = max_bits - 1
x[1] = np.asarray(np.clip(x[1], 0, max_shift), dtype=input_dtypes[1])
max_value_before_shift = 2 ** (max_bits - x[1]) - 1
overflow_threshold = 2 ** (max_bits - 1)
x[0] = np.asarray(
np.clip(x[0], None, max_value_before_shift), dtype=input_dtypes[0]
)
if np.any(x[0] > overflow_threshold):
x[0] = np.clip(x[0], None, overflow_threshold)
if np.any(x[0] < 0):
x[0] = np.abs(x[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"value": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__isub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___isub__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __itruediv__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__itruediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_numpy___itruediv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ixor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___ixor__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__le__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___le__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __len__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__len__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
),
)
def test_numpy___len__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__lt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___lt__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__matmul__",
x=_get_first_matrix_and_dtype(),
y=_get_second_matrix_and_dtype(),
)
def test_numpy___matmul__(
x,
y,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
dtype1, x1 = x
dtype2, x2 = y
input_dtypes = dtype1 + dtype2
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x1,
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": x2,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# mod
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__mod__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
min_value=0,
exclude_min=True,
),
)
def test_numpy___mod__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
rtol_=1e-5,
atol_=1e-5,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__mul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___mul__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ne__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___ne__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__neg__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_numpy___neg__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__or__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___or__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__pos__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_numpy___pos__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__pow__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
power=helpers.ints(min_value=1, max_value=3),
)
def test_numpy___pow__(
dtype_and_x,
power,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": power,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__radd__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___radd__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
method_flags=method_flags,
init_flags=init_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__rmul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___rmul__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__rshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
),
)
def test_numpy___rshift__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
max_bits = np.iinfo(input_dtypes[0]).bits
max_shift = max_bits - 1
x[1] = np.asarray(np.clip(x[1], 0, max_shift), dtype=input_dtypes[1])
max_value_before_shift = 2 ** (max_bits - x[1]) - 1
overflow_threshold = 2 ** (max_bits - 1)
x[0] = np.asarray(
np.clip(x[0], None, max_value_before_shift), dtype=input_dtypes[0]
)
if np.any(x[0] > overflow_threshold):
x[0] = np.clip(x[0], None, overflow_threshold)
if np.any(x[0] < 0):
x[0] = np.abs(x[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"value": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__rtruediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___rtruediv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
assume(not np.any(np.isclose(xs[0], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__sub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2
),
)
def test_numpy___sub__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
backend_to_test=backend_fw,
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__truediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
),
)
def test_numpy___truediv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
assume(not np.any(np.isclose(xs[0], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
init_flags=init_flags,
method_flags=method_flags,
frontend_method_data=frontend_method_data,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__xor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=("bool",),
num_arrays=2,
),
)
def test_numpy___xor__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"value": xs[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="all",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid", full=True),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=True,
),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_all(
dtype_x_axis,
keepdims,
where,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
init_input_dtypes, x, axis = dtype_x_axis
(
where,
method_input_dtypes,
method_flags,
) = np_frontend_helpers.handle_where_and_array_bools(
where=[where[0][0]] if isinstance(where, list) else where,
input_dtype=init_input_dtypes,
test_flags=method_flags,
)
helpers.test_frontend_method(
init_input_dtypes=init_input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=method_input_dtypes[1:],
method_all_as_kwargs_np={
"axis": axis,
"dtype": bool,
"out": None,
"keepdims": keepdims,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# any
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="any",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
valid_axis=True,
force_int_axis=True,
allow_neg_axes=True,
),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_any(
dtype_x_axis,
keepdims,
where,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
init_input_dtypes, x, axis = dtype_x_axis
(
where,
method_input_dtypes,
method_flags,
) = np_frontend_helpers.handle_where_and_array_bools(
where=[where[0][0]] if isinstance(where, list) else where,
input_dtype=init_input_dtypes,
test_flags=method_flags,
)
helpers.test_frontend_method(
init_input_dtypes=init_input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=method_input_dtypes[1:],
method_all_as_kwargs_np={
"axis": axis,
"dtype": bool,
"out": None,
"keepdims": keepdims,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="argmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(
helpers.get_dtypes("numeric"),
),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
)
def test_numpy_argmax(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdims": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="argmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
keepdims=st.booleans(),
)
def test_numpy_argmin(
dtype_x_axis,
keepdims,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdims": keepdims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="argsort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
)
def test_numpy_argsort(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
method_all_as_kwargs_np={
"axis": axis,
"kind": None,
"order": None,
},
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="astype",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
)
],
),
order=st.sampled_from(["C", "F", "A", "K"]),
copy=st.booleans(),
)
def test_numpy_astype(
dtypes_values_casting,
order,
copy,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, casting, dtype = dtypes_values_casting
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"dtype": dtype if dtype else input_dtypes[0],
"order": order,
"casting": casting,
"copy": copy,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="clip",
input_and_ranges=_get_clip_inputs(),
)
def test_numpy_clip(
input_and_ranges,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, min, max = input_and_ranges
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_all_as_kwargs_np={
"min": min,
"max": max,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="compress",
dtype_arr_ax=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=10,
max_dim_size=100,
valid_axis=True,
force_int_axis=True,
),
condition=helpers.array_values(
dtype=helpers.get_dtypes("bool"),
shape=helpers.get_shape(
min_num_dims=1, max_num_dims=1, min_dim_size=1, max_dim_size=5
),
),
)
def test_numpy_compress(
dtype_arr_ax,
condition,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, arr, ax = dtype_arr_ax
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": arr[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"condition": condition,
"axis": ax,
"out": None,
},
frontend=frontend,
backend_to_test=backend_fw,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="conjugate",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("real_and_complex"),
),
)
def test_numpy_conjugate(
dtype_and_x,
on_device,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="copy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
),
)
def test_numpy_copy(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
dtype=helpers.get_dtypes("float", full=False, none=True),
)
def test_numpy_cumprod(
dtype_x_axis,
dtype,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype[0],
"out": None,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="cumsum",
dtype_x_axis_dtype=_get_castable_dtypes_values(),
)
def test_numpy_cumsum(
dtype_x_axis_dtype,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis, dtype = dtype_x_axis_dtype
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype,
"out": None,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="diagonal",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_axes_size=2,
max_axes_size=2,
valid_axis=True,
),
offset=st.integers(min_value=-2, max_value=2),
)
def test_numpy_diagonal(
dtype_x_axis,
offset,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis1": axis[0],
"axis2": axis[1],
"offset": offset,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="dot",
dtype_and_x=np_frontend_helpers._get_dtype_input_and_vectors(),
)
def test_numpy_dot(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x, other = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"b": other,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_dtype(dtype_x, backend_fw, frontend):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ivy_backend.utils.assertions.check_equal(
x.dtype, ivy.Dtype(dtype[0]), as_array=False
)
# fill
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="fill",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
num=st.integers(min_value=1, max_value=10) | st.floats(min_value=1, max_value=10),
)
def test_numpy_fill(
dtype_and_x,
num,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=[],
method_all_as_kwargs_np={
"num": num,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric", prune_function=False),
num_arrays=1,
ret_shape=True,
)
)
def test_numpy_flat(dtype_x, backend_fw):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
flat_ivy = x.flat
flat_ivy = flat_ivy.ivy_array.to_numpy()
flat_generated = ivy_backend.to_numpy(data[0]).flatten()
ivy_backend.utils.assertions.check_equal(
flat_ivy, flat_generated, as_array=True
)
# __getitem__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__getitem__",
dtype_x_index=helpers.dtype_array_query(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_getitem(
dtype_x_index,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x, index = dtype_x_index
helpers.test_frontend_method(
init_input_dtypes=[input_dtype[0]],
init_all_as_kwargs_np={"object": x},
method_input_dtypes=[*input_dtype[1:]],
method_all_as_kwargs_np={"key": index},
backend_to_test=backend_fw,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __ilshift__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__ilshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
max_dim_size=1,
max_value=2**31 - 1,
),
)
def test_numpy_instance_ilshift__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
max_bits = np.iinfo(input_dtypes[0]).bits
max_shift = max_bits - 1
x[1] = np.asarray(np.clip(x[1], 0, max_shift), dtype=input_dtypes[1])
max_value_before_shift = 2 ** (max_bits - x[1]) - 1
overflow_threshold = 2 ** (max_bits - 1)
x[0] = np.asarray(
np.clip(x[0], None, max_value_before_shift), dtype=input_dtypes[0]
)
if np.any(x[0] > overflow_threshold):
x[0] = np.clip(x[0], None, overflow_threshold)
if np.any(x[0] < 0):
x[0] = np.abs(x[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"value": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__lshift__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
max_dim_size=1,
max_value=2**31 - 1,
),
)
def test_numpy_instance_lshift__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtypes, x = dtype_and_x
max_bits = np.iinfo(input_dtypes[0]).bits
max_shift = max_bits - 1
x[1] = np.asarray(np.clip(x[1], 0, max_shift), dtype=input_dtypes[1])
max_value_before_shift = 2 ** (max_bits - x[1]) - 1
overflow_threshold = 2 ** (max_bits - 1)
x[0] = np.asarray(
np.clip(x[0], None, max_value_before_shift), dtype=input_dtypes[0]
)
if np.any(x[0] > overflow_threshold):
x[0] = np.clip(x[0], None, overflow_threshold)
if np.any(x[0] < 0):
x[0] = np.abs(x[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"value": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="item",
args_kwargs=_item_helper(),
)
def test_numpy_item(
args_kwargs,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, method_all_as_kwargs_np, num_positional_args = args_kwargs
method_flags.num_positional_args = num_positional_args
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"object": x},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np=method_all_as_kwargs_np,
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_ivy_array(
dtype_x,
frontend,
backend_fw,
):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ret = helpers.flatten_and_to_np(ret=x.ivy_array.data, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(ret=data[0], backend=frontend)
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="max",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keepdims=st.booleans(),
)
def test_numpy_max(
dtype_x_axis,
keepdims,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdims": keepdims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="mean",
dtype_and_x=_statistical_dtype_values(function="mean"),
dtype=helpers.get_dtypes("float", full=False, none=True),
where=np_frontend_helpers.where(),
keep_dims=st.booleans(),
)
def test_numpy_mean(
dtype_and_x,
dtype,
where,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_and_x
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=method_flags,
)
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes[1:],
init_all_as_kwargs_np={
"object": x[0],
},
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype[0],
"out": None,
"keepdims": keep_dims,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="min",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keepdims=st.booleans(),
)
def test_numpy_min(
dtype_x_axis,
keepdims,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdims": keepdims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="nonzero",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_nonzero(
dtype_and_a,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, a = dtype_and_a
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": a[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# prod
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="prod",
dtype_x_axis_dtype=_get_castable_dtypes_values(use_where=True),
keep_dims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_prod(
dtype_x_axis_dtype,
keep_dims,
initial,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis, dtype, where = dtype_x_axis_dtype
if ivy.current_backend_str() == "torch":
assume(not method_flags.as_variable[0])
(
where,
input_dtypes,
method_flags,
) = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=method_flags,
)
where = ivy.array(where, dtype="bool")
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype,
"keepdims": keep_dims,
"initial": initial,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_property_ndim(dtype_x, backend_fw):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ivy_backend.utils.assertions.check_equal(x.ndim, data[0].ndim, as_array=False)
# ptp
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="ptp",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
),
)
def test_numpy_ptp(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="ravel",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_ravel(
dtype_and_a,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, a = dtype_and_a
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": a[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="repeat",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
min_dim_size=2,
),
repeats=helpers.ints(min_value=2, max_value=5),
axis=helpers.ints(min_value=-1, max_value=1),
)
def test_numpy_repeat(
dtype_and_x,
repeats,
axis,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"repeats": repeats,
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="reshape",
dtypes_x_shape=dtypes_x_reshape(),
order=st.sampled_from(["C", "F", "A"]),
)
def test_numpy_reshape(
dtypes_x_shape,
order,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, shape = dtypes_x_shape
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=[],
method_all_as_kwargs_np={
"newshape": shape,
"order": order,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# round
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="round",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
num_arrays=1,
max_value=50,
min_value=-50,
),
decimals=st.integers(min_value=0, max_value=3),
)
def test_numpy_round(
dtype_and_x,
decimals,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
init_all_as_kwargs_np={
"object": x,
},
method_all_as_kwargs_np={
"decimals": decimals,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="searchsorted",
dtype_x_v=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("signed_integer"),
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
),
side=st.sampled_from(["left", "right"]),
)
def test_numpy_searchsorted(
dtype_x_v,
side,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_x_v
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"v": xs[1],
"side": side,
"sorter": np.argsort(xs[0]),
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __setitem__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="__setitem__",
dtypes_x_index_val=helpers.dtype_array_query_val(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_setitem(
dtypes_x_index_val,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x, index, val = dtypes_x_index_val
helpers.test_frontend_method(
init_input_dtypes=[input_dtype[0]],
init_all_as_kwargs_np={"object": x},
method_input_dtypes=[*input_dtype[1:]],
method_all_as_kwargs_np={"key": index, "value": val},
backend_to_test=backend_fw,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_shape(
dtype_x,
backend_fw,
):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ivy_backend.utils.assertions.check_equal(
x.shape, ivy.Shape(shape), as_array=False
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
)
def test_numpy_size(
dtype_x,
):
dtype, data, shape = dtype_x
x = ndarray(shape, dtype[0])
x.ivy_array = data[0]
ivy.utils.assertions.check_equal(x.size, data[0].size, as_array=False)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="sort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
)
def test_numpy_sort(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
ret, frontend_ret = helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
test_values=False,
on_device=on_device,
)
frontend_ret = np.sort(x[0], axis=axis)
assert_all_close(
ret_np=ret,
ret_from_gt_np=frontend_ret,
rtol=1e-2,
atol=1e-2,
backend=backend_fw,
ground_truth_backend="numpy",
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="squeeze",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="value_shape"),
),
axis=_squeeze_helper(),
)
def test_numpy_squeeze(
dtype_and_x,
axis,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="std",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
max_value=100,
valid_axis=True,
force_int_axis=True,
),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_std(
dtype_x_axis,
keepdims,
where,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
(
where,
input_dtypes,
method_flags,
) = np_frontend_helpers.handle_where_and_array_bools(
where=[where[0][0]] if isinstance(where, list) else where,
input_dtype=input_dtypes,
test_flags=method_flags,
)
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"data": x[0],
},
method_all_as_kwargs_np={
"axis": axis,
"out": None,
"ddof": 0,
"keepdims": keepdims,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# sum
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="sum",
dtype_x_axis_dtype=_get_castable_dtypes_values(use_where=True),
keep_dims=st.booleans(),
initial=st.one_of(st.floats(min_value=-100, max_value=100)),
)
def test_numpy_sum(
dtype_x_axis_dtype,
keep_dims,
initial,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis, dtype, where = dtype_x_axis_dtype
if ivy.current_backend_str() == "torch":
assume(not method_flags.as_variable[0])
(
where,
input_dtypes,
method_flags,
) = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=method_flags,
)
where = ivy.array(where, dtype="bool")
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype,
"keepdims": keep_dims,
"initial": initial,
"where": where,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
backend_to_test=backend_fw,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="swapaxes",
dtype_x_and_axes=dtype_values_and_axes(),
)
def test_numpy_swapaxes(
dtype_x_and_axes,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
input_dtypes, x, axis1, axis2 = dtype_x_and_axes
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_all_as_kwargs_np={
"axis1": axis1,
"axis2": axis2,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# tobytes
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
),
order=st.sampled_from(["C", "F"]),
)
def test_numpy_tobytes(
dtype_x,
order,
backend_fw,
):
dtype, data, shape = dtype_x
with BackendHandler.update_backend(backend_fw) as ivy_backend:
x = ivy_backend.functional.frontends.numpy.ndarray(shape, dtype[0])
x.ivy_array = data[0]
ivy_backend.utils.assertions.check_equal(
x.tobytes(order=order), data[0].tobytes(order=order), as_array=False
)
# tofile
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="tofile",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
path=st.text(
alphabet=st.characters(whitelist_categories=("Lu", "Ll", "Nd", "Pc")),
min_size=1,
max_size=50,
),
)
def test_numpy_tofile(
dtype_and_x,
path,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"fid": path,
},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
# tolist
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="tolist",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_tolist(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
test_values=False, # Todo change this after we add __iter__ to ndarray
)
# trace
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="trace",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=2,
min_axes_size=2,
max_axes_size=2,
valid_axis=True,
),
offset=st.integers(min_value=-2, max_value=2),
)
def test_numpy_trace(
dtype_x_axis,
offset,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis1": axis[0],
"axis2": axis[1],
"offset": offset,
"out": None,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="transpose",
array_and_axes=np_frontend_helpers._array_and_axes_permute_helper(
min_num_dims=2,
max_num_dims=5,
min_dim_size=2,
max_dim_size=10,
),
)
def test_numpy_transpose(
array_and_axes,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
array, input_dtypes, axes = array_and_axes
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": np.array(array),
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axes": axes,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# var
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="var",
dtype_x_axis=_statistical_dtype_values(function="var"),
dtype=helpers.get_dtypes("valid", full=False, none=True),
where=np_frontend_helpers.where(),
keepdims=st.booleans(),
)
def test_numpy_var(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
keepdims,
where,
dtype,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": x[0],
},
method_all_as_kwargs_np={
"axis": axis,
"dtype": dtype,
"keepdims": keepdims,
"where": where,
},
frontend=frontend,
backend_to_test=backend_fw,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
rtol_=1e-2,
atol_=1e-2,
on_device=on_device,
)
# view
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="numpy.array",
method_name="view",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_numpy_view(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={},
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
frontend_method_data=frontend_method_data,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_ndarray/test_ndarray.py",
"repo_id": "ivy",
"token_count": 49490
} | 50 |
# import tensorflow
from ivy_tests.test_ivy.test_frontends import NativeClass
onnx_classes_to_ivy_classes = {}
def convtensor(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for onnx."""
if isinstance(argument, NativeClass):
return onnx_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_onnx/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_onnx/__init__.py",
"repo_id": "ivy",
"token_count": 122
} | 51 |
# global
from hypothesis import assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_frontends.test_torch.test_nn.test_functional.test_convolution_functions import ( # noqa: E501
_x_and_filters,
_output_shape,
)
from ivy_tests.test_ivy.test_functional.test_nn.test_layers import (
_assume_tf_dilation_gt_1,
)
# conv1d
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv1d",
dtype_vals=_x_and_filters(dim=1),
)
def test_paddle_conv1d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
)
# conv1d_transpose
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv1d_transpose",
dtype_vals=_x_and_filters(dim=1, transpose=True),
)
def test_paddle_conv1d_tranpose(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals
dilations = 1 # ToDo: remove this when support for dilation > 1 is added
assume(
all(
x > 0
for x in _output_shape(
1, dilations, strides, padding, output_pad, vals.shape, weight.shape
)
)
)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
output_padding=output_pad,
groups=fc,
dilation=dilations,
)
# conv2d
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv2d",
dtype_vals=_x_and_filters(dim=2),
)
def test_paddle_conv2d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
)
# conv2d_transpose
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv2d_transpose",
dtype_vals=_x_and_filters(dim=2, transpose=True),
)
def test_paddle_conv2d_tranpose(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals
dilations = 1 # ToDo: remove this when support for dilation > 1 is added
assume(
all(
x > 0
for x in _output_shape(
2, dilations, strides, padding, output_pad, vals.shape, weight.shape
)
)
)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
output_padding=output_pad,
dilation=dilations,
groups=fc,
)
# conv3d
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv3d",
dtype_vals=_x_and_filters(dim=3),
)
def test_paddle_conv3d(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals
# ToDo: Enable gradient tests for dilations > 1 when tensorflow supports it.
_assume_tf_dilation_gt_1(backend_fw, on_device, dilations)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
dilation=dilations,
groups=fc,
)
# conv3d_transpose
@handle_frontend_test(
fn_tree="paddle.nn.functional.conv3d_transpose",
dtype_vals=_x_and_filters(dim=3, transpose=True),
)
def test_paddle_conv3d_tranpose(
*,
dtype_vals,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals
dilations = 1 # ToDo: remove this when support for dilation > 1 is added
assume(
all(
x > 0
for x in _output_shape(
3, dilations, strides, padding, output_pad, vals.shape, weight.shape
)
)
)
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=vals,
weight=weight,
bias=bias,
stride=strides,
padding=padding,
output_padding=output_pad,
groups=fc,
dilation=dilations,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_nn/test_functional/test_conv.py",
"repo_id": "ivy",
"token_count": 2873
} | 52 |
# global
import numpy as np
from hypothesis import assume, given
from hypothesis import strategies as st
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy.functional.frontends.paddle import Tensor
from ivy_tests.test_ivy.helpers import assert_all_close
from ivy_tests.test_ivy.helpers import handle_frontend_method, BackendHandler
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa E501
_get_dtype_values_k_axes_for_rot90,
)
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
from ivy_tests.test_ivy.test_frontends.test_torch.test_blas_and_lapack_ops import (
_get_dtype_and_3dbatch_matrices,
)
from ivy_tests.test_ivy.test_frontends.test_paddle.test_manipulation import (
_tile_helper,
)
CLASS_TREE = "ivy.functional.frontends.paddle.Tensor"
# --- Helpers --- #
# --------------- #
@st.composite
def _array_and_shape(
draw,
*,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=10,
):
if isinstance(min_dim_size, st._internal.SearchStrategy):
min_dim_size = draw(min_dim_size)
if isinstance(max_dim_size, st._internal.SearchStrategy):
max_dim_size = draw(max_dim_size)
available_dtypes = draw(helpers.get_dtypes("numeric"))
dtype = draw(
helpers.array_dtypes(
num_arrays=1,
available_dtypes=available_dtypes,
)
)
dtype.append("int32")
shape = draw(
st.shared(
helpers.get_shape(
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
),
key="shape",
)
)
array = draw(
helpers.array_values(
dtype=dtype[0],
shape=shape,
)
)
to_shape = [(None if draw(st.booleans()) else _) for _ in shape]
return dtype, [array, to_shape]
def _filter_query(query):
return (
query.ndim > 1
if isinstance(query, np.ndarray)
else (
not any(isinstance(i, np.ndarray) and i.ndim <= 1 for i in query)
if isinstance(query, tuple)
else True
)
)
# as_complex
@st.composite
def _get_as_complex_inputs_(draw):
shape = draw(
helpers.get_shape(
min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=(*shape, 2),
min_value=0,
max_value=50,
)
)
return x_dtype, x
# clip
@st.composite
def _get_clip_inputs(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
min_value=0,
max_value=50,
)
)
min = draw(
helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=0, max_value=25)
)
max = draw(
helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=26, max_value=50)
)
if draw(st.booleans()):
min = None
elif draw(st.booleans()):
max = None
return x_dtype, x, min, max
# clip_
@st.composite
def _get_clip_inputs_(draw):
shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10
)
)
x_dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
min_value=0,
max_value=50,
)
)
min = draw(
helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=0, max_value=25)
)
max = draw(
helpers.array_values(dtype=x_dtype[0], shape=(1,), min_value=26, max_value=50)
)
return x_dtype, x, min, max
@st.composite
def _get_dtype_and_3dbatch_matrices_for_matmul(draw):
dim_size1 = draw(helpers.ints(min_value=2, max_value=5))
dim_size2 = draw(helpers.ints(min_value=2, max_value=5))
shared_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
batch_size = draw(helpers.ints(min_value=2, max_value=4))
transpose_x = draw(st.booleans())
transpose_y = draw(st.booleans())
mat1_shape = (
(batch_size, dim_size1, shared_size)
if not transpose_x
else (batch_size, shared_size, dim_size1)
)
mat2_shape = (
(batch_size, shared_size, dim_size2)
if not transpose_y
else (batch_size, dim_size2, shared_size)
)
mat1 = draw(
helpers.array_values(dtype=dtype[0], shape=mat1_shape, min_value=2, max_value=5)
)
mat2 = draw(
helpers.array_values(dtype=dtype[0], shape=mat2_shape, min_value=2, max_value=5)
)
return dtype, mat1, mat2, transpose_x, transpose_y
# cond
@st.composite
def _get_dtype_and_matrix_non_singular(draw, dtypes):
while True:
matrix = draw(
helpers.dtype_and_values(
available_dtypes=dtypes,
min_value=-10,
max_value=10,
min_num_dims=2,
max_num_dims=2,
min_dim_size=1,
max_dim_size=5,
shape=st.tuples(st.integers(1, 5), st.integers(1, 5)).filter(
lambda x: x[0] == x[1]
),
allow_inf=False,
allow_nan=False,
)
)
if np.linalg.det(matrix[1][0]) != 0:
break
return matrix[0], matrix[1]
@st.composite
def _get_dtype_and_multiplicative_matrices(draw):
return draw(
st.one_of(
_get_dtype_input_and_matrices_for_matmul(),
_get_dtype_and_3dbatch_matrices_for_matmul(),
)
)
@st.composite
def _get_dtype_and_square_matrix(draw):
dim_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
mat = draw(
helpers.array_values(
dtype=dtype[0], shape=(dim_size, dim_size), min_value=0, max_value=10
)
)
return dtype, mat
# bmm helper function
@st.composite
def _get_dtype_and_values_bmm(draw):
# arrays x and y of sizes (b, m, k) and (b, k, n) respectively
b = draw(helpers.ints(min_value=1, max_value=10))
k = draw(helpers.ints(min_value=1, max_value=10))
m = draw(helpers.ints(min_value=1, max_value=10))
n = draw(helpers.ints(min_value=1, max_value=10))
dtype = draw(helpers.get_dtypes("float", index=1, full=False))
x = draw(
helpers.array_values(
dtype=dtype[0], shape=(b, m, k), min_value=-10, max_value=10
)
)
y = draw(
helpers.array_values(
dtype=dtype[0], shape=(b, k, n), min_value=-10, max_value=10
)
)
return dtype, x, y
# lerp helper function
@st.composite
def _get_dtype_and_values_for_lerp(draw):
is_tensor = draw(st.booleans())
if is_tensor:
input_dtype, x = draw(
helpers.dtype_and_values(
num_arrays=3,
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
)
)
return input_dtype, x[0], x[1], x[2]
else:
input_dtype, x = draw(
helpers.dtype_and_values(
num_arrays=2,
available_dtypes=helpers.get_dtypes("valid"),
shared_dtype=True,
)
)
weight = draw(st.floats())
return input_dtype, x[0], x[1], weight
@st.composite
def _get_dtype_input_and_matrices_for_matmul(draw):
dim_size1 = draw(helpers.ints(min_value=2, max_value=5))
dim_size2 = draw(helpers.ints(min_value=2, max_value=5))
shared_size = draw(helpers.ints(min_value=2, max_value=5))
dtype = draw(helpers.get_dtypes("float", full=True))
dtype = [
draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"}))))
]
transpose_x = draw(st.booleans())
transpose_y = draw(st.booleans())
mat1_shape = (shared_size, dim_size1) if transpose_x else (dim_size1, shared_size)
mat2_shape = (dim_size2, shared_size) if transpose_y else (shared_size, dim_size2)
mat1 = draw(
helpers.array_values(dtype=dtype[0], shape=mat1_shape, min_value=2, max_value=5)
)
mat2 = draw(
helpers.array_values(dtype=dtype[0], shape=mat2_shape, min_value=2, max_value=5)
)
return dtype, mat1, mat2, transpose_x, transpose_y
@st.composite
def _get_dtype_value1_value2_cov(
draw,
available_dtypes,
min_num_dims,
max_num_dims,
min_dim_size,
max_dim_size,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(1):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value = values[0]
# modifiers: rowVar, bias, ddof
rowVar = draw(st.booleans())
ddof = draw(st.booleans())
numVals = None
if rowVar is False:
numVals = -1 if numVals == 0 else 0
else:
numVals = 0 if len(shape) == 1 else -1
fweights = draw(
helpers.array_values(
dtype="int64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
)
)
aweights = draw(
helpers.array_values(
dtype="float64",
shape=shape[numVals],
abs_smallest_val=1,
min_value=1,
max_value=10,
allow_inf=False,
small_abs_safety_factor=1,
)
)
return [dtype], value, rowVar, ddof, fweights, aweights
@st.composite
def _reshape_helper(draw):
# generate a shape s.t len(shape) > 0
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
)
)
reshape_shape = draw(helpers.reshape_shapes(shape=shape))
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape,
)
)
return dtypes, x, reshape_shape
# expand helper function
@st.composite
def dtypes_x_shape(draw):
dtypes, x = draw(
helpers.dtype_and_values(
min_dim_size=1,
min_num_dims=1,
available_dtypes=["float32"],
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=6,
),
key="shape",
),
)
)
shape = draw(
st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=6,
),
key="shape",
)
)
return dtypes, x, shape
# --- Main --- #
# ------------ #
# __add__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__add__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___add__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__float__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
),
)
def test_paddle___float__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
# Numpy doesn't support complex to float conversion
assume(not np.issubdtype(input_dtypes[0], np.complexfloating))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__floordiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
),
)
def test_paddle___floordiv__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"other": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
atol_=1,
)
# __ge__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__ge__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___ge__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __getitem__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__getitem__",
dtype_x_index=helpers.dtype_array_query(
available_dtypes=helpers.get_dtypes("valid"),
allow_neg_step=False,
).filter(lambda x: x[0][0] == x[0][-1] and _filter_query(x[-2])),
)
def test_paddle___getitem__(
dtype_x_index,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, index = dtype_x_index
helpers.test_frontend_method(
init_input_dtypes=[input_dtype[0]],
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x},
method_input_dtypes=[*input_dtype[1:]],
method_all_as_kwargs_np={"item": index},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __gt__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__gt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___gt__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__int__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=-1e15,
max_value=1e15,
),
)
def test_paddle___int__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
# Numpy doesn't support complex to int conversion
assume(not np.issubdtype(input_dtypes[0], np.complexfloating))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# invert
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__invert__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
max_num_dims=0,
),
)
def test_paddle___invert__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __le__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__le__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___le__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __len__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__len__",
dtype_and_x=_array_and_shape(
min_num_dims=1,
max_num_dims=5,
),
)
def test_paddle___len__(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# long
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__long__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
max_num_dims=0,
min_value=-1e15,
max_value=1e15,
),
)
def test_paddle___long__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
frontend,
on_device,
):
input_dtypes, xs = dtype_and_x
# Numpy doesn't support complex to int conversion
assume(not np.issubdtype(input_dtypes[0], np.complexfloating))
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
method_input_dtypes=input_dtypes,
init_all_as_kwargs_np={
"object": xs[0],
},
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __lt__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__lt__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___lt__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __mul__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__mul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___mul__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __ne__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__ne__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_paddle___ne__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"other": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __neg__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__neg__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_paddle___neg__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __or__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__or__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___or__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __radd__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__radd__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___radd__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"x": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__rdiv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
)
def test_paddle___rdiv__(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __rmul__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__rmul__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_paddle___rmul__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"other": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __rsub__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__rsub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle___rsub__(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"x": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__rtruediv__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=10,
small_abs_safety_factor=10,
safety_factor_scale="log",
),
)
def test_paddle___rtruediv__(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# __setitem__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__setitem__",
dtypes_x_index_val=helpers.dtype_array_query_val(
available_dtypes=helpers.get_dtypes("valid"),
).filter(lambda x: x[0][0] == x[0][-1] and _filter_query(x[-2])),
)
def test_paddle___setitem__(
dtypes_x_index_val,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, index, val = dtypes_x_index_val
helpers.test_frontend_method(
init_input_dtypes=[input_dtype[0]],
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x},
method_input_dtypes=[*input_dtype[1:]],
method_all_as_kwargs_np={"item": index, "value": val},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __sub__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__sub__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle___sub__(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# __xor__
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="__xor__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle___xor__(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# abs
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="abs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_abs(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# acosh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_acosh(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# add_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="add_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
test_inplace=st.just(True),
)
def test_paddle_add_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="add_n",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=helpers.ints(min_value=1, max_value=5),
shared_dtype=True,
),
)
def test_paddle_add_n(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"inputs": x},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"inputs": x},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# addmm
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="addmm",
dtype_input_xy=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True),
beta=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
alpha=st.floats(
min_value=-5,
max_value=5,
allow_nan=False,
allow_subnormal=False,
allow_infinity=False,
),
)
def test_paddle_addmm(
*,
dtype_input_xy,
beta,
alpha,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, input, x, y = dtype_input_xy
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": input[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"x": x[0], "y": y[0], "beta": beta, "alpha": alpha},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# all
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="all",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("bool"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
)
def test_paddle_all(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# allclose
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="allclose",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
# rtol=1e-05,
# atol=1e-08,
# equal_nan=st.booleans(),
)
def test_paddle_allclose(
dtype_and_x,
# rtol,
# atol,
# equal_nan,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"other": x[1],
# "rtol": rtol,
# "atol": atol,
# "equal_nan": equal_nan,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="angle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float64", "complex64", "complex128"],
),
)
def test_paddle_angle(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# any
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="any",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("float")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
)
def test_paddle_any(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# argmax
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="argmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("float")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
)
def test_paddle_argmax(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# argmin
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="argmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("valid")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
)
def test_paddle_argmin(
dtype_x_axis,
keep_dims,
on_device,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
frontend,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# argsort
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="argsort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("float")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
descending=st.booleans(),
)
def test_paddle_argsort(
dtype_x_axis,
descending,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"descending": descending,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# as_complex
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="as_complex",
dtypes_and_x=_get_as_complex_inputs_(),
)
def test_paddle_as_complex(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# as_real
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="as_real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
),
)
def test_paddle_as_real(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# asin
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_asin(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# asinh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="asinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_asinh(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# astype
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="astype",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
dtype=st.one_of(helpers.get_dtypes("valid")),
)
def test_paddle_astype(
dtype_and_x,
dtype,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
if dtype is None:
dtype = input_dtype
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"dtype": dtype,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# atan
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="atan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_atan(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
backend_to_test=backend_fw,
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# bitwise_and
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="bitwise_and",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_bitwise_and(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# bitwise_not
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="bitwise_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_bitwise_not(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="bitwise_or",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_bitwise_or(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# bitwise_xor
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="bitwise_xor",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_bitwise_xor(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# bmm
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="bmm",
dtype_and_x=_get_dtype_and_values_bmm(),
)
def test_paddle_bmm(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, y = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": y},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# cast
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cast",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_paddle_cast(
dtype_and_x,
dtype,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
if dtype is None:
dtype = input_dtype
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"dtype": dtype[0],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# ceil
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_ceil(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# ceil_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="ceil_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_inplace=st.just(True),
)
def test_paddle_ceil_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# cholesky
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cholesky",
dtype_and_x=_get_dtype_and_square_matrix(),
upper=st.booleans(),
)
def test_paddle_cholesky(
dtype_and_x,
upper,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
x = np.matmul(x.T, x) + np.identity(x.shape[0])
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"upper": upper},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="clip",
input_and_ranges=_get_clip_inputs(),
)
def test_paddle_clip(
input_and_ranges,
frontend,
frontend_method_data,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x, min, max = input_and_ranges
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"min": min, "max": max},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
# clip_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="clip_",
input_and_ranges=_get_clip_inputs_(),
test_inplace=st.just(True),
)
def test_paddle_clip_(
input_and_ranges,
frontend,
frontend_method_data,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, x, min_val, max_val = input_and_ranges
if min_val > max_val:
max_value = min_val
min_value = max_val
else:
max_value = max_val
min_value = min_val
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"min": min_value, "max": max_value},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cond",
dtype_and_x=_get_dtype_and_matrix_non_singular(dtypes=["float32", "float64"]),
p=st.sampled_from([None, "fro", "nuc", np.inf, -np.inf, 1, -1, 2, -2]),
)
def test_paddle_cond(
dtype_and_x,
p,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"p": p},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# conj
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="conj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_paddle_conj(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# cos
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_cos(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# cosh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_cosh(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
)
def test_paddle_cumprod(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"dim": axis},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cumsum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
)
def test_paddle_cumsum(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"axis": axis},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# deg2rad
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="deg2rad",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_deg2rad(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# Tests #
# ----- #
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_paddle_device(
dtype_x,
):
_, data = dtype_x
x = Tensor(data[0])
x.ivy_array = data[0]
ivy.utils.assertions.check_equal(
x.place, ivy.dev(ivy.array(data[0])), as_array=False
)
# diagonal
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="diagonal",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
dims_and_offset=helpers.dims_and_offset(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape")
),
)
def test_paddle_diagonal(
dtype_and_values,
dims_and_offset,
frontend,
frontend_method_data,
backend_fw,
init_flags,
method_flags,
on_device,
):
input_dtype, value = dtype_and_values
dim1, dim2, offset = dims_and_offset
input = value[0]
num_dims = len(np.shape(input))
assume(dim1 != dim2)
if dim1 < 0:
assume(dim1 + num_dims != dim2)
if dim2 < 0:
assume(dim1 != dim2 + num_dims)
helpers.test_frontend_method(
init_input_dtypes=[input_dtype[0]],
init_all_as_kwargs_np={"x": input},
method_input_dtypes=[input_dtype[0]],
method_all_as_kwargs_np={
"offset": offset,
"axis1": dim1,
"axis2": dim2,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# digamma
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="digamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=1,
max_value=1e5,
),
)
def test_paddle_digamma(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# dim
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="dim",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_dim(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# divide
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="divide",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
safety_factor_scale="log",
small_abs_safety_factor=32,
),
)
def test_paddle_divide(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_paddle_dtype(
dtype_x,
):
dtype, data = dtype_x
x = Tensor(data[0])
x.ivy_array = data[0]
ivy.utils.assertions.check_equal(x.dtype, dtype[0], as_array=False)
# eigvals
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="eigvals",
dtype_and_x=_get_dtype_and_square_matrix(),
)
def test_paddle_eigvals(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
ret, frontend_ret = helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
test_values=False,
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
# check if Tensor or ivy array
try:
ret = ret.ivy_array.to_numpy()
except AttributeError:
ret = ivy_backend.to_numpy(ret)
frontend_ret = [np.asarray(x) for x in frontend_ret]
# Calculate the magnitude of the complex numbers then sort them for testing
ret = np.sort(np.abs(ret)).astype(np.float64)
frontend_ret = np.sort(np.abs(frontend_ret)).astype(np.float64)
assert_all_close(
ret_np=ret,
ret_from_gt_np=frontend_ret,
backend=backend_fw,
ground_truth_backend=frontend,
atol=1e-2,
rtol=1e-2,
)
# equal
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="equal",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_equal(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# equal_all
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="equal_all",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=-np.inf,
max_value=np.inf,
shared_dtype=True,
safety_factor_scale="log",
small_abs_safety_factor=32,
),
)
def test_paddle_equal_all(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# erf
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="erf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_erf(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# exp
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="exp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_exp(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# exp_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="exp_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_inplace=st.just(True),
)
def test_paddle_exp_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# fill_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="fill_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
allow_inf=False,
),
dtype_v=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=(1,),
min_value=0,
max_value=10,
),
)
def test_paddle_fill_(
dtype_and_x,
dtype_v,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
value_dtype, v = dtype_v
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=value_dtype,
method_all_as_kwargs_np={"value": v[0].item()},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# floor
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_floor(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# floor_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="floor_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_inplace=st.just(True),
)
def test_paddle_floor_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# floor_divide
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="floor_divide",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=2,
shared_dtype=True,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
)
def test_paddle_floor_divide(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
# Absolute tolerance is 1,
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
atol_=1,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="fmax",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_fmax(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="fmin",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_fmin(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# frac
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="frac",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="valid"),
num_arrays=1,
max_value=1e6,
min_value=-1e6,
),
)
def test_paddle_frac(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# gather
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="gather",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_gather(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# greater_than
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="greater_than",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
safety_factor_scale="log",
small_abs_safety_factor=32,
),
)
def test_paddle_greater_than(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# imag
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_imag(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# inner
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="inner",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_value=-10,
max_value=10,
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_inner(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# is_complex
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="is_complex",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_is_complex(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# is_floating_point
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="is_floating_point",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["int16", "int32", "int64", "float32", "float64"],
),
)
def test_paddle_is_floating_point(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# is_tensor
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="is_tensor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
),
)
def test_paddle_is_tensor(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# isclose
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="isclose",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_isclose(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# isfinite
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="isfinite",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_isfinite(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# isinf
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="isinf",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_isinf(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# isnan
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="isnan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_isnan(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# lerp
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="lerp",
dtypes_and_x=_get_dtype_and_values_for_lerp(),
)
def test_paddle_lerp(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, y, weight = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": y,
"weight": weight,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# lerp_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="lerp_",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=3, shared_dtype=True
),
test_inplace=st.just(True),
)
def test_paddle_lerp_(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
"weight": x[2],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# less_equal
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="less_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_less_equal(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# less_than
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="less_than",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_less_than(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# log
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_log(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# log10
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="log10",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_log10(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# logical_and
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="logical_and",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_logical_and(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"self": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# logical_not
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="logical_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_logical_not(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# logical_or
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="logical_or",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_logical_or(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# logical_xor
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="logical_xor",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_logical_xor(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# max
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="max",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("valid")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=False,
),
keep_dims=st.booleans(),
)
def test_paddle_max(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# mean
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="mean",
dtype_and_x=_statistical_dtype_values(function="mean"),
keepdim=st.booleans(),
)
def test_paddle_mean(
dtype_and_x,
keepdim,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keepdim,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
backend_to_test=backend_fw,
method_flags=method_flags,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="minimum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_minimum(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
min_value=0,
exclude_min=True,
),
)
def test_paddle_mod(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="multiply",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_multiply(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_paddle_ndim(
dtype_x,
):
_, data = dtype_x
x = Tensor(data[0])
ivy.utils.assertions.check_equal(x.ndim, data[0].ndim, as_array=False)
# neg
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="neg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
)
def test_paddle_neg(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# nonzero
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="nonzero",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="float"),
min_num_dims=1,
allow_inf=True,
),
)
def test_paddle_nonzero(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# not_equal
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="not_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="valid"),
num_arrays=2,
shared_dtype=True,
),
)
def test_paddle_not_equal(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"x": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="numel",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
),
)
def test_paddle_numel(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# numpy
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="numpy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
min_dim_size=2,
),
)
def test_paddle_numpy(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=[],
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# pow
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="pow",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
allow_inf=False,
shared_dtype=True,
),
)
def test_paddle_pow(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# prod
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="prod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_value=-5,
max_value=5,
valid_axis=True,
force_int_axis=True,
allow_inf=False,
),
keep_dims=st.booleans(),
)
def test_paddle_prod(
dtype_x_axis,
keep_dims,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
"keepdim": keep_dims,
"dtype": x[0].dtype,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# rad2deg
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="rad2deg",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_rad2deg(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(kind="valid"),
num_arrays=2,
min_num_dims=1,
allow_inf=True,
),
)
def test_paddle_real(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# reciprocal
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_reciprocal(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# reciprocal_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="reciprocal_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_reciprocal_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# remainder
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="remainder",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_remainder(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# remainder_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="remainder_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_inplace=st.just(True),
)
def test_paddle_remainder_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# reshape
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="reshape",
dtype_x_shape=_reshape_helper(),
)
def test_paddle_reshape(
dtype_x_shape,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, shape = dtype_x_shape
assume(len(shape) != 0)
shape = {
"shape": shape,
}
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np=shape,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# reshape_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="reshape_",
dtype_x_shape=_reshape_helper(),
)
def test_paddle_reshape_(
dtype_x_shape,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, shape = dtype_x_shape
assume(len(shape) != 0)
shape = {
"shape": shape,
}
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np=shape,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# rot90
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="rot90",
dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=3,
max_num_dims=6,
min_dim_size=1,
max_dim_size=10,
),
)
def test_paddle_rot90(
dtype_m_k_axes,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, values, k, axes = dtype_m_k_axes
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": values,
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"k": k,
"axes": axes,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# round_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="round_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_round_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# rsqrt
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_rsqrt(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# rsqrt_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="rsqrt_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_inplace=st.just(True),
)
def test_paddle_rsqrt_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_paddle_shape(dtype_x):
_, data, shape = dtype_x
x = Tensor(data[0])
ivy.utils.assertions.check_equal(
x.ivy_array.shape, ivy.Shape(shape), as_array=False
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sign(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# sin
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sin(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# sinh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_sinh(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# sort
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=st.one_of(helpers.get_dtypes("float")),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
descending=st.booleans(),
)
def test_paddle_sort(
dtype_x_axis,
descending,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"object": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
"descending": descending,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# split
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="split",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_split(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# sqrt
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_sqrt(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# sqrt_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="sqrt_",
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_inplace=st.just(True),
)
def test_paddle_sqrt_(
dtype_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x = dtype_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# square
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="square",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_square(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# squeeze_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="squeeze_",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
test_inplace=st.just(True),
)
def test_paddle_squeeze_(
dtype_value,
axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_value
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# stanh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="stanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
scale_a=st.floats(1e-5, 1e5),
scale_b=st.floats(1e-5, 1e5),
)
def test_paddle_stanh(
dtype_and_x,
frontend_method_data,
scale_a,
scale_b,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"scale_a": scale_a,
"scale_b": scale_b,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# std
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="std",
dtype_and_x=_statistical_dtype_values(function="std"),
keepdim=st.booleans(),
)
def test_paddle_std(
dtype_and_x,
keepdim,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
"unbiased": bool(correction),
"keepdim": keepdim,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
backend_to_test=backend_fw,
method_flags=method_flags,
on_device=on_device,
)
# subtract
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="subtract",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_subtract(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# subtract_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="subtract_",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
test_inplace=st.just(True),
)
def test_paddle_subtract_(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# t
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="t",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
max_num_dims=2,
),
)
def test_paddle_t(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
backend_to_test=backend_fw,
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# tanh
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_tanh(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# tanh_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="tanh_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_tanh_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_paddle_tensor_acos(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# add
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True
),
)
def test_paddle_tensor_add(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# chunk
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="chunk",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
min_value=0.1,
max_value=1e02,
force_int_axis=True,
valid_axis=True,
),
chunks=st.integers(
min_value=1,
max_value=8,
),
)
def test_paddle_tensor_chunk(
dtype_x_axis,
chunks,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_x_axis
is_remainder = x[0].shape[axis] % chunks != 0
axis_solvable = len(x[0].shape) + axis < 0
if is_remainder or axis_solvable:
assume(False)
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"chunks": chunks,
"axis": axis,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# cov
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="cov",
dtype_x1_corr_cov=_get_dtype_value1_value2_cov(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=2,
min_dim_size=2,
max_dim_size=5,
min_value=1,
max_value=1e10,
abs_smallest_val=0.01,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_paddle_tensor_cov(
dtype_x1_corr_cov,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
dtype, x, rowvar, ddof, fweights, aweights = dtype_x1_corr_cov
helpers.test_frontend_method(
init_input_dtypes=["float64", "int64", "float64"],
init_all_as_kwargs_np={
"data": x,
},
method_input_dtypes=["int64", "float64"],
backend_to_test=backend_fw,
method_all_as_kwargs_np={
"rowvar": rowvar,
"ddof": ddof,
"fweights": fweights,
"aweights": aweights,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
rtol_=1e-3,
atol_=1e-3,
frontend=frontend,
on_device=on_device,
)
# expand
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="expand",
dtype_x_shape=dtypes_x_shape(),
)
def test_paddle_tensor_expand(
dtype_x_shape,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, shape = dtype_x_shape
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"shape": shape,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# flatten
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="flatten",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axes=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
min_size=2,
max_size=2,
unique=False,
force_tuple=True,
),
)
def test_paddle_tensor_flatten(
dtype_value,
axes,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_value
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"start_axis": axes[0],
"stop_axis": axes[1],
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# floor_mod
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="floor_mod",
dtypes_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
min_value=2,
shared_dtype=True,
),
)
def test_paddle_tensor_floor_mod(
dtypes_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtypes_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"y": x[1]},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="heaviside",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
allow_inf=False,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
shared_dtype=True,
),
)
def test_paddle_tensor_heaviside(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"x": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"y": x[1],
},
init_flags=init_flags,
method_flags=method_flags,
frontend_method_data=frontend_method_data,
frontend=frontend,
on_device=on_device,
)
# matmul
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="matmul",
dtype_tensor1_tensor2=_get_dtype_and_multiplicative_matrices(),
)
def test_paddle_tensor_matmul(
dtype_tensor1_tensor2,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
dtype, tensor1, tensor2, transpose_x, transpose_y = dtype_tensor1_tensor2
helpers.test_frontend_method(
init_input_dtypes=dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": tensor1,
},
method_input_dtypes=dtype,
method_all_as_kwargs_np={
"y": tensor2,
"transpose_x": transpose_x,
"transpose_y": transpose_y,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# squeeze
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="squeeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
)
def test_paddle_tensor_squeeze(
dtype_value,
axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_value
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# tile
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="tile",
dt_x_repeats=_tile_helper(),
)
def test_paddle_tensor_tile(
dt_x_repeats,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtypes, x, repeats = dt_x_repeats
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"repeat_times": repeats,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
backend_to_test=backend_fw,
method_flags=method_flags,
on_device=on_device,
)
# topk
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="topk",
dtype_x_and_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
k=st.data(),
sorted=st.booleans(),
largest=st.booleans(),
)
def test_paddle_topk(
dtype_x_and_axis,
k,
sorted,
largest,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_x_and_axis
k = k.draw(st.integers(min_value=1, max_value=x[0].shape[axis]))
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"k": k,
"axis": axis,
"largest": largest,
"sorted": sorted,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
test_values=False,
)
# trace
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="trace",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=2,
min_value=-1e04,
max_value=1e04,
allow_inf=False,
),
offset=st.integers(min_value=-1e04, max_value=1e04),
axis1=st.integers(min_value=0, max_value=0),
axis2=st.integers(min_value=1, max_value=1),
)
def test_paddle_trace(
dtype_and_x,
offset,
axis1,
axis2,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"value": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"offset": offset,
"axis1": axis1,
"axis2": axis2,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
backend_to_test=backend_fw,
on_device=on_device,
)
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="trunc",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_paddle_trunc(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
backend_to_test=backend_fw,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# unbind
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="unbind",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=2,
max_dim_size=1,
force_int_axis=True,
min_axis=-1,
max_axis=0,
),
)
def test_paddle_unbind(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# unique_consecutive
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="unique_consecutive",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
max_num_dims=4,
max_dim_size=1,
force_int_axis=True,
min_axis=-1,
max_axis=0,
),
)
def test_paddle_unique_consecutive(
dtype_x_axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtypes, x, axis = dtype_x_axis
helpers.test_frontend_method(
init_input_dtypes=input_dtypes,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtypes,
method_all_as_kwargs_np={
"axis": axis,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
on_device=on_device,
)
# unsqueeze
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="unsqueeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
)
def test_paddle_unsqueeze(
dtype_value,
axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_value
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# unsqueeze_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="unsqueeze_",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
axis=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
test_inplace=st.just(True),
)
def test_paddle_unsqueeze_(
dtype_value,
axis,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_value
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
# var
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="var",
dtype_and_x=_statistical_dtype_values(function="var"),
keepdim=st.booleans(),
)
def test_paddle_var(
dtype_and_x,
keepdim,
frontend,
backend_fw,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x, axis, correction = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={"data": x[0]},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={
"axis": axis,
"unbiased": bool(correction),
"keepdim": keepdim,
},
frontend=frontend,
frontend_method_data=frontend_method_data,
init_flags=init_flags,
backend_to_test=backend_fw,
method_flags=method_flags,
on_device=on_device,
)
# zero_
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="paddle.to_tensor",
method_name="zero_",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
allow_inf=False,
),
test_inplace=st.just(True),
)
def test_paddle_zero_(
dtype_and_x,
frontend_method_data,
init_flags,
method_flags,
frontend,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
backend_to_test=backend_fw,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py",
"repo_id": "ivy",
"token_count": 82173
} | 53 |
# global
import ivy
import numpy as np
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
# fmt: off
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_sorting \
import (
_invert_permutation_helper,
)
# fmt: on
from ivy_tests.test_ivy.helpers import handle_frontend_test
# abs
@handle_frontend_test(
fn_tree="tensorflow.math.abs",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=25,
small_abs_safety_factor=25,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_abs(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=x[0],
)
# accumulate_n
@handle_frontend_test(
fn_tree="tensorflow.math.accumulate_n",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.int64,),
num_arrays=helpers.ints(min_value=2, max_value=5),
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_accumulate_n(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
inputs=x,
)
# acos
@handle_frontend_test(
fn_tree="tensorflow.math.acos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1,
max_value=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_acos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# acosh
@handle_frontend_test(
fn_tree="tensorflow.math.acosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
small_abs_safety_factor=3,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_acosh(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=x[0],
)
# add
@handle_frontend_test(
fn_tree="tensorflow.math.add",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_add(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# add_n
@handle_frontend_test(
fn_tree="tensorflow.math.add_n",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=helpers.ints(min_value=1, max_value=5),
shared_dtype=True,
),
)
def test_tensorflow_add_n(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
inputs=x,
)
# angle
@handle_frontend_test(
fn_tree="tensorflow.math.angle",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=["float64", "complex64", "complex128"],
),
)
def test_tensorflow_angle(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# argmax
@handle_frontend_test(
fn_tree="tensorflow.math.argmax",
dtype_and_x=_statistical_dtype_values(function="argmax"),
output_type=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_argmax(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
output_type,
):
input_dtype, x, axis, *_ = dtype_and_x
if isinstance(axis, tuple):
axis = axis[0]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
output_type=output_type,
)
# argmin
@handle_frontend_test(
fn_tree="tensorflow.math.argmin",
dtype_and_x=_statistical_dtype_values(function="argmin"),
output_type=st.sampled_from(["int32", "int64"]),
test_with_out=st.just(False),
)
def test_tensorflow_argmin(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
output_type,
):
input_dtype, x, axis, *_ = dtype_and_x
if isinstance(axis, tuple):
axis = axis[0]
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
axis=axis,
output_type=output_type,
)
# asin
@handle_frontend_test(
fn_tree="tensorflow.math.asin",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-1,
max_value=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_asin(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# asinh
@handle_frontend_test(
fn_tree="tensorflow.math.asinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_asinh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# atan
@handle_frontend_test(
fn_tree="tensorflow.math.atan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_atan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# atan2
@handle_frontend_test(
fn_tree="tensorflow.math.atan2",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True
),
test_with_out=st.just(False),
)
def test_tensorflow_atan2(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=x[0],
x=x[1],
)
# atanh
@handle_frontend_test(
fn_tree="tensorflow.math.atanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
test_with_out=st.just(False),
)
def test_tensorflow_atanh(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# bessel_i1
@handle_frontend_test(
fn_tree="tensorflow.math.bessel_i1",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=1,
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=4,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_bessel_i1(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# bincount
@handle_frontend_test(
fn_tree="tensorflow.math.bincount",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=1,
max_value=2,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
),
key="a_s_d",
),
),
test_with_out=st.just(False),
)
def test_tensorflow_bincount(
*,
dtype_and_x,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
arr=x[0],
weights=None,
minlength=0,
)
# ceil
@handle_frontend_test(
fn_tree="tensorflow.math.ceil",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_ceil(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# confusion_matrix
@handle_frontend_test(
fn_tree="tensorflow.math.confusion_matrix",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
min_value=0,
max_value=4,
shared_dtype=True,
),
num_classes=st.integers(min_value=5, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_confusion_matrix(
*,
dtype_and_x,
num_classes,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
labels=x[0],
predictions=x[1],
num_classes=num_classes,
)
# conj
@handle_frontend_test(
fn_tree="tensorflow.math.conj",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
)
def test_tensorflow_conj(
*,
dtype_and_input,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.math.cos",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_cos(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# cosh
@handle_frontend_test(
fn_tree="tensorflow.math.cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
test_with_out=st.just(False),
)
def test_tensorflow_cosh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# count_nonzero
@handle_frontend_test(
fn_tree="tensorflow.math.count_nonzero",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
allow_neg_axes=False,
),
keepdims=st.booleans(),
dtype=helpers.get_dtypes("numeric", full=False),
test_with_out=st.just(False),
)
def test_tensorflow_count_nonzero(
*,
dtype_x_axis,
dtype,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
axis=axis,
keepdims=keepdims,
dtype=dtype[0],
)
# cumprod
@handle_frontend_test(
fn_tree="tensorflow.math.cumprod",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
exclusive=st.booleans(),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_cumprod( # NOQA
*,
dtype_x_axis,
exclusive,
reverse,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
# cumsum
@handle_frontend_test(
fn_tree="tensorflow.math.cumsum",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
valid_axis=True,
force_int_axis=True,
min_num_dims=1,
min_value=-5,
max_value=5,
),
exclusive=st.booleans(),
reverse=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_cumsum( # NOQA
*,
dtype_x_axis,
exclusive,
reverse,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
axis=axis,
exclusive=exclusive,
reverse=reverse,
)
# digamma
@handle_frontend_test(
fn_tree="tensorflow.math.digamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_digamma(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# divide
@handle_frontend_test(
fn_tree="tensorflow.math.divide",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_divide(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# divide_no_nan
@handle_frontend_test(
fn_tree="tensorflow.math.divide_no_nan",
dtype_and_x=helpers.dtype_and_values(
num_arrays=2,
available_dtypes=helpers.get_dtypes("float"),
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_divide_no_nan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xy = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xy[0],
y=xy[1],
)
# equal
@handle_frontend_test(
fn_tree="tensorflow.math.equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_equal(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# erfcinv
@handle_frontend_test(
fn_tree="tensorflow.math.erfcinv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_erfcinv(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# exp
@handle_frontend_test(
fn_tree="tensorflow.math.exp",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_exp(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# expm1
@handle_frontend_test(
fn_tree="tensorflow.math.expm1",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_expm1(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floor
@handle_frontend_test(
fn_tree="tensorflow.math.floor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_floor(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# floormod
@handle_frontend_test(
fn_tree="tensorflow.math.floormod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_floormod(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# greater
@handle_frontend_test(
fn_tree="tensorflow.math.greater",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_greater(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# greater_equal
@handle_frontend_test(
fn_tree="tensorflow.math.greater_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_greater_equal(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
@handle_frontend_test(
fn_tree="tensorflow.math.igamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
abs_smallest_val=1e-5,
min_num_dims=2,
max_num_dims=2,
min_dim_size=3,
max_dim_size=3,
min_value=2,
max_value=100,
allow_nan=False,
),
test_with_out=st.just(False),
)
def test_tensorflow_igamma(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-04,
a=xs[0],
x=xs[1],
)
# imag
@handle_frontend_test(
fn_tree="tensorflow.math.imag",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_imag(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
input=x[0],
)
# in_top_k
@handle_frontend_test(
fn_tree="tensorflow.math.in_top_k",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
k=st.integers(min_value=0, max_value=5),
test_with_out=st.just(False),
)
def test_tensorflow_in_top_k(
*, dtype_and_x, frontend, test_flags, backend_fw, fn_tree, on_device, k
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
targets=x[0],
pred=x[1],
k=k,
)
# invert_permutation
@handle_frontend_test(
fn_tree="tensorflow.math.invert_permutation",
dtype_and_perm=_invert_permutation_helper(for_frontend_test=True),
test_with_out=st.just(False),
)
def test_tensorflow_invert_permutation(
*,
dtype_and_perm,
frontend,
test_flags,
backend_fw,
fn_tree,
on_device,
):
input_dtype, perm = dtype_and_perm
helpers.test_frontend_function(
input_dtypes=[input_dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=perm,
)
# is_finite
@handle_frontend_test(
fn_tree="tensorflow.math.is_finite",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_is_finite(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# is_inf
@handle_frontend_test(
fn_tree="tensorflow.math.is_inf",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_is_inf(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# is_nan
@handle_frontend_test(
fn_tree="tensorflow.math.is_nan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_is_nan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# is_non_decreasing
@handle_frontend_test(
fn_tree="tensorflow.math.is_non_decreasing",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_is_non_decreasing(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# is_strictly_increasing
@handle_frontend_test(
fn_tree="tensorflow.math.is_strictly_increasing",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_is_strictly_increasing(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# l2_normalize
@handle_frontend_test(
fn_tree="tensorflow.math.l2_normalize",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=3,
max_num_dims=5,
min_dim_size=1,
max_dim_size=4,
min_axis=-3,
max_axis=2,
),
)
def test_tensorflow_l2_normalize(
*,
dtype_values_axis,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
x=x[0],
axis=axis,
)
# less
@handle_frontend_test(
fn_tree="tensorflow.math.less",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_less(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# less_equal
@handle_frontend_test(
fn_tree="tensorflow.math.less_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_less_equal(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# lgamma
@handle_frontend_test(
fn_tree="tensorflow.math.lgamma",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_lgamma(
*,
dtype_and_x,
on_device,
fn_tree,
backend_fw,
frontend,
test_flags,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-04,
x=xs[0],
)
# log
@handle_frontend_test(
fn_tree="tensorflow.math.log",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_tensorflow_log(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log1p
@handle_frontend_test(
fn_tree="tensorflow.math.log1p",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_log1p(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log_sigmoid
@handle_frontend_test(
fn_tree="tensorflow.math.log_sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=3,
small_abs_safety_factor=3,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_tensorflow_log_sigmoid(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# log_softmax
@handle_frontend_test(
fn_tree="tensorflow.math.log_softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_log_softmax(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logits=x[0],
)
# logical_and
@handle_frontend_test(
fn_tree="tensorflow.math.logical_and",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.bool,),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_logical_and(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# logical_not
@handle_frontend_test(
fn_tree="tensorflow.math.logical_not",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.bool,),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_logical_not(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# logical_or
@handle_frontend_test(
fn_tree="tensorflow.math.logical_or",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("bool"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_logical_or(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# logical_xor
@handle_frontend_test(
fn_tree="tensorflow.math.logical_xor",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.bool,),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_logical_xor(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# maximum
@handle_frontend_test(
fn_tree="tensorflow.math.maximum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_maximum(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# minimum
@handle_frontend_test(
fn_tree="tensorflow.math.minimum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
min_value=-20,
max_value=20,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_minimum(
*,
dtype_and_x,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# mod
@handle_frontend_test(
fn_tree="tensorflow.math.mod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_mod(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
assume(not np.any(np.isclose(x[0], 0)))
assume(not np.any(np.isclose(x[1], 0)))
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# multiply
@handle_frontend_test(
fn_tree="tensorflow.math.multiply",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_multiply(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# multiply_no_nan
@handle_frontend_test(
fn_tree="tensorflow.math.multiply_no_nan",
dtype_and_x=helpers.dtype_and_values(
num_arrays=2,
available_dtypes=helpers.get_dtypes("float"),
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_multiply_no_nan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xy = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xy[0],
y=xy[1],
)
# negative
@handle_frontend_test(
fn_tree="tensorflow.math.negative",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.one_of(
helpers.get_dtypes("signed_integer"),
helpers.get_dtypes("float"),
)
),
test_with_out=st.just(False),
)
def test_tensorflow_negative(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# nextafter
@handle_frontend_test(
fn_tree="tensorflow.math.nextafter",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
num_arrays=2,
shared_dtype=True,
min_value=-10,
max_value=10,
min_num_dims=1,
max_num_dims=3,
),
test_with_out=st.just(False),
)
def test_tensorflow_nextafter(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
)
# not_equal
@handle_frontend_test(
fn_tree="tensorflow.math.not_equal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_not_equal(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# polyval
@handle_frontend_test(
fn_tree="tensorflow.math.polyval",
dtype_and_coeffs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
max_num_dims=1,
),
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_num_dims=0,
max_num_dims=0,
),
)
def test_tensorflow_polyval(
*,
dtype_and_coeffs,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype_x, x = dtype_and_x
dtype_coeffs, coeffs = dtype_and_coeffs
helpers.test_frontend_function(
input_dtypes=dtype_coeffs + dtype_x,
frontend=frontend,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_tree=fn_tree,
on_device=on_device,
coeffs=coeffs,
x=x,
)
# pow
@handle_frontend_test(
fn_tree="tensorflow.math.pow",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=[
"float16",
"float32",
"float64",
"int32",
"int64",
],
num_arrays=2,
min_value=1,
max_value=7,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_pow(dtype_and_x, frontend, test_flags, backend_fw, fn_tree):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
x=x[0],
y=x[1],
)
# real
@handle_frontend_test(
fn_tree="tensorflow.math.real",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_real(
*,
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
# reciprocal
@handle_frontend_test(
fn_tree="tensorflow.math.reciprocal",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_reciprocal(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-3,
atol=1e-3,
x=x[0],
)
# reciprocal_no_nan
@handle_frontend_test(
fn_tree="tensorflow.math.reciprocal_no_nan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reciprocal_no_nan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# reduce_all()
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_all",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.bool,),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_all(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_any
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_any",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=(ivy.bool,),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_any(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
) = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_euclidean_norm
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_euclidean_norm",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
max_num_dims=2,
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_euclidean_norm(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
(
input_dtype,
x,
) = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
rtol=1e-01,
atol=1e-01,
on_device=on_device,
input_tensor=x[0],
)
# reduce_logsumexp
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_logsumexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_logsumexp(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_max
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_max",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_max(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_mean
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_mean",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_mean(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-2,
rtol=1e-2,
on_device=on_device,
input_tensor=x[0],
)
# reduce_min
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_min",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_min(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_prod
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_prod",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_prod(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_std
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_std",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
)
def test_tensorflow_reduce_std(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
)
# reduce_sum
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_sum",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=25,
small_abs_safety_factor=25,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_reduce_sum(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
atol=1e-03,
input_tensor=x[0],
)
# reduce_variance
@handle_frontend_test(
fn_tree="tensorflow.math.reduce_variance",
dtype_and_x=_statistical_dtype_values(
function="var",
),
test_with_out=st.just(False),
keepdims=st.booleans(),
)
def test_tensorflow_reduce_variance(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keepdims,
):
input_dtype, x, axis, ddof = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input_tensor=x[0],
axis=axis,
atol=1e-2,
rtol=1e-2,
keepdims=keepdims,
)
@handle_frontend_test(
fn_tree="tensorflow.math.rint",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_rint(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
backend_to_test=backend_fw,
x=x[0],
)
# round
@handle_frontend_test(
fn_tree="tensorflow.math.round",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_round(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# rsqrt
@handle_frontend_test(
fn_tree="tensorflow.math.rsqrt",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
test_with_out=st.just(False),
)
def test_tensorflow_rsqrt(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=x[0],
)
# scalar_mul
@handle_frontend_test(
fn_tree="tensorflow.math.scalar_mul",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.shared(
helpers.get_dtypes("float", full=False),
key="shared_dtype",
),
min_num_dims=1,
min_dim_size=2,
),
scalar_val=helpers.dtype_and_values(
available_dtypes=st.shared(
helpers.get_dtypes("float", full=False),
key="shared_dtype",
),
shape=(1,),
),
test_with_out=st.just(False),
)
def test_tensorflow_scalar_mul(
*,
dtype_and_x,
scalar_val,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
scalar_dtype, scalar = scalar_val
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
scalar=scalar[0][0],
x=x[0],
)
@handle_frontend_test(
fn_tree="tensorflow.math.segment_sum",
data=helpers.array_values(dtype=helpers.get_dtypes("valid"), shape=(5, 6)),
segment_ids=helpers.array_values(
dtype=helpers.get_dtypes("signed_integer", prune_function=True),
shape=(5,),
min_value=0,
max_value=4,
),
test_with_out=st.just(False),
)
def test_tensorflow_segment_sum(
*,
data,
segment_ids,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=[str(data.dtype), "int32", "int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data,
segment_ids=np.sort(segment_ids),
)
# sigmoid
@handle_frontend_test(
fn_tree="tensorflow.math.sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
num_arrays=1,
min_value=-20,
max_value=20,
),
test_with_out=st.just(False),
)
def test_tensorflow_sigmoid(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
)
# sin
@handle_frontend_test(
fn_tree="tensorflow.math.sin",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_sin(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# sinh
@handle_frontend_test(
fn_tree="tensorflow.math.sinh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
test_with_out=st.just(False),
)
def test_tensorflow_sinh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# softmax
@handle_frontend_test(
fn_tree="tensorflow.math.softmax",
dtype_values_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
allow_inf=False,
),
test_with_out=st.just(False),
)
def test_tensorflow_softmax(
*,
dtype_values_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, axis = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
logits=x[0],
atol=1e-02,
rtol=1e-2,
axis=axis,
)
# softplus
@handle_frontend_test(
fn_tree="tensorflow.math.softplus",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_softplus(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
# softsign
@handle_frontend_test(
fn_tree="tensorflow.math.softsign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_tensorflow_softsign(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
features=x[0],
)
# sqrt
@handle_frontend_test(
fn_tree="tensorflow.math.sqrt",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_sqrt(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# square
@handle_frontend_test(
fn_tree="tensorflow.math.square",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_square(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# squared_difference
@handle_frontend_test(
fn_tree="tensorflow.math.squared_difference",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_squared_difference(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# subtract
@handle_frontend_test(
fn_tree="tensorflow.math.subtract",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_subtract(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
)
# tan
@handle_frontend_test(
fn_tree="tensorflow.math.tan",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_tensorflow_tan(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# tanh
@handle_frontend_test(
fn_tree="tensorflow.math.tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_tensorflow_tanh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
# top_k
@handle_frontend_test(
fn_tree="tensorflow.math.top_k",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shared_dtype=True,
),
k=st.integers(min_value=0, max_value=5),
sorted=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_top_k(
*, dtype_and_x, frontend, test_flags, fn_tree, on_device, k, backend_fw
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
k=k,
sorted=sorted,
)
# truediv
@handle_frontend_test(
fn_tree="tensorflow.math.truediv",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_tensorflow_truediv(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
y=x[1],
rtol=1e-2,
atol=1e-2,
)
# unsorted_segment_mean
@handle_frontend_test(
fn_tree="tensorflow.math.unsorted_segment_mean",
data=helpers.array_values(dtype=ivy.int32, shape=(5, 6), min_value=1, max_value=9),
segment_ids=helpers.array_values(
dtype="int32", shape=(5,), min_value=0, max_value=4
),
test_with_out=st.just(False),
)
def test_tensorflow_unsorted_segment_mean(
*,
data,
segment_ids,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int32", "int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data,
segment_ids=segment_ids,
num_segments=np.max(segment_ids) + 1,
)
# unsorted_segment_min
@handle_frontend_test(
fn_tree="tensorflow.math.unsorted_segment_min",
data=helpers.array_values(dtype=ivy.int32, shape=(5, 6), min_value=1, max_value=9),
segment_ids=helpers.array_values(
dtype=ivy.int32, shape=(5,), min_value=0, max_value=4
),
test_with_out=st.just(False),
)
def test_tensorflow_unsorted_segment_min(
*,
data,
segment_ids,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int32", "int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data,
segment_ids=segment_ids,
num_segments=np.max(segment_ids) + 1,
)
# unsorted_segment_sqrt_n
@handle_frontend_test(
fn_tree="tensorflow.math.unsorted_segment_sqrt_n",
data=helpers.array_values(dtype=ivy.int32, shape=(5, 6), min_value=1, max_value=9),
segment_ids=helpers.array_values(
dtype=ivy.int32, shape=(5,), min_value=0, max_value=4
),
test_with_out=st.just(False),
)
def test_tensorflow_unsorted_segment_sqrt_n(
*,
data,
segment_ids,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=[ivy.float32, ivy.int32],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data,
segment_ids=segment_ids,
num_segments=np.max(segment_ids) + 1,
)
# unsorted_segment_sum
@handle_frontend_test(
fn_tree="tensorflow.math.unsorted_segment_sum",
data=helpers.array_values(dtype=ivy.int32, shape=(5, 6), min_value=1, max_value=9),
segment_ids=helpers.array_values(
dtype=ivy.int32, shape=(5,), min_value=0, max_value=4
),
test_with_out=st.just(False),
)
def test_tensorflow_unsorted_segment_sum(
*,
data,
segment_ids,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=["int32", "int64"],
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=data,
segment_ids=segment_ids,
num_segments=np.max(segment_ids) + 1,
)
# xdivy
@handle_frontend_test(
fn_tree="tensorflow.math.xdivy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_xdivy(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# Xlog1py
@handle_frontend_test(
fn_tree="tensorflow.math.xlog1py",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_xlog1py(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# xlogy
@handle_frontend_test(
fn_tree="tensorflow.math.xlogy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_xlogy(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
y=xs[1],
)
# zero_fraction
@handle_frontend_test(
fn_tree="tensorflow.math.zero_fraction",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_zero_fraction(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x[0],
)
# zeta
@handle_frontend_test(
fn_tree="tensorflow.math.zeta",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
max_num_dims=1,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_zeta(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
q=x[1],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py",
"repo_id": "ivy",
"token_count": 39446
} | 54 |
# global
from hypothesis import strategies as st, assume
import math
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
from ivy_tests.test_ivy.helpers.testing_helpers import handle_example
# --- Helpers --- #
# --------------- #
@st.composite
def _as_strided_helper(draw):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
ret_shape=True,
)
)
ndim = len(shape)
numel = x[0].size
offset = draw(st.integers(min_value=0, max_value=numel - 1))
numel = numel - offset
size = draw(
helpers.get_shape(
min_num_dims=ndim,
max_num_dims=ndim,
).filter(lambda s: math.prod(s) <= numel)
)
stride = draw(
helpers.get_shape(
min_num_dims=ndim,
max_num_dims=ndim,
).filter(lambda s: all(numel // s_i >= size[i] for i, s_i in enumerate(s)))
)
return x_dtype, x, size, stride, offset
@st.composite
def _as_tensor_helper(draw):
dtype_and_x = draw(
st.one_of(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
st.floats(),
st.integers(),
st.lists(st.one_of(st.floats(), st.integers()), min_size=1),
)
)
if isinstance(dtype_and_x, tuple):
input_dtype = dtype_and_x[0]
x = dtype_and_x[1][0]
else:
input_dtype = []
x = dtype_and_x
dtype = draw(
st.one_of(
helpers.get_castable_dtype(
draw(helpers.get_dtypes("valid")),
dtype=draw(helpers.get_dtypes("valid", full=False))[0],
x=x,
),
st.none(),
)
)
if isinstance(dtype, tuple):
dtype = dtype[0]
return input_dtype, x, dtype
# Helper functions
@st.composite
def _fill_value(draw):
with_array = draw(st.sampled_from([True, False]))
dtype = draw(st.shared(helpers.get_dtypes("numeric", full=False), key="dtype"))[0]
with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend:
if ivy_backend.is_uint_dtype(dtype):
ret = draw(helpers.ints(min_value=0, max_value=5))
elif ivy_backend.is_int_dtype(dtype):
ret = draw(helpers.ints(min_value=-5, max_value=5))
else:
ret = draw(helpers.floats(min_value=-5, max_value=5))
if with_array:
return np.array(ret, dtype=dtype)
else:
return ret
@st.composite
def _get_dtype_buffer_count_offset(draw):
dtype, value = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
)
)
value = np.array(value)
length = value.size
value = value.tobytes()
offset = draw(helpers.ints(min_value=0, max_value=length - 1))
count = draw(helpers.ints(min_value=-(2**30), max_value=length - offset))
if count == 0:
count = -1
offset = offset * np.dtype(dtype[0]).itemsize
return dtype, value, count, offset
@st.composite
def _heaviside_helper(draw):
input_dtype, data = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
)
)
_, values = draw(
helpers.dtype_and_values(
available_dtypes=input_dtype,
shape=helpers.get_shape(
min_num_dims=1,
max_num_dims=1,
min_dim_size=1,
max_dim_size=1,
),
)
)
return input_dtype, data, values
@st.composite
def _start_stop_step(draw):
start = draw(helpers.ints(min_value=0, max_value=50))
stop = draw(helpers.ints(min_value=0, max_value=50))
if start < stop:
step = draw(helpers.ints(min_value=1, max_value=50))
else:
step = draw(helpers.ints(min_value=-50, max_value=-1))
return start, stop, step
# --- Main --- #
# ------------ #
# arange
@handle_frontend_test(
fn_tree="torch.arange",
start_stop_step=_start_stop_step(),
dtype=helpers.get_dtypes("float", full=False),
)
def test_torch_arange(
*,
start_stop_step,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
start, stop, step = start_stop_step
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
end=stop,
step=step,
out=None,
dtype=dtype[0],
device=on_device,
)
# as_strided
@handle_frontend_test(
fn_tree="torch.as_strided",
dtype_x_and_other=_as_strided_helper(),
)
def test_torch_as_strided(
*,
dtype_x_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, size, stride, offset = dtype_x_and_other
try:
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
size=size,
stride=stride,
storage_offset=offset,
)
except Exception as e:
if hasattr(e, "message") and "out of bounds for storage of size" in e.message:
assume(False)
else:
raise e
# as_tensor
@handle_frontend_test(
fn_tree="torch.as_tensor",
dtype_x_dtype=_as_tensor_helper(),
)
def test_torch_as_tensor(
*,
dtype_x_dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, dtype = dtype_x_dtype
# ToDo: fix get_castable_dtype to avoid the exceptions
try:
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=x,
dtype=dtype,
device=on_device,
)
except Exception as e:
if any(
error_string in str(e)
for error_string in ["overflow", "too large to convert to"]
):
assume(False)
else:
raise
# asarray
@handle_frontend_test(
fn_tree="torch.asarray",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
dtype=helpers.get_dtypes("numeric", full=False),
test_with_copy=st.just(True),
)
def test_torch_asarray(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
obj=x[0],
dtype=dtype[0],
device=on_device,
)
# complex
@handle_frontend_test(
fn_tree="torch.complex",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
)
def test_torch_complex(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
real=input[0],
imag=input[0],
)
# empty
@handle_frontend_test(
fn_tree="torch.empty",
size=helpers.ints(min_value=1, max_value=3),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_torch_empty(
*,
size,
shape,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dims = {}
size = (size,)
if shape is None:
i = 0
for x_ in size:
dims[f"x{i}"] = x_
i += 1
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**dims,
size=shape,
dtype=dtype[0],
test_values=False,
device=on_device,
)
# empty_like
@handle_frontend_test(
fn_tree="torch.empty_like",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_torch_empty_like(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, inputs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=inputs[0],
dtype=dtype[0],
device=on_device,
test_values=False,
)
@handle_frontend_test(
fn_tree="torch.empty_strided",
dtype_x_and_other=_as_strided_helper(),
)
def test_torch_empty_strided(
*,
dtype_x_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x_dtype, x, size, stride, offset = dtype_x_and_other
helpers.test_frontend_function(
input_dtypes=x_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
size=size,
stride=stride,
test_values=False,
)
# from_dlpack
@handle_frontend_test(
fn_tree="torch.from_dlpack",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric")
),
)
def test_torch_from_dlpack(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
ext_tensor=x[0],
backend_to_test=backend_fw,
input_dtypes=input_dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
)
# from_numpy
@handle_frontend_test(
fn_tree="torch.from_numpy",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
)
def test_torch_from_numpy(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
data=input[0],
)
@handle_frontend_test(
fn_tree="torch.frombuffer",
dtype_buffer_count_offset=_get_dtype_buffer_count_offset(),
)
def test_torch_frombuffer(
dtype_buffer_count_offset,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
input_dtype, buffer, count, offset = dtype_buffer_count_offset
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
buffer=buffer,
dtype=input_dtype[0],
count=count,
offset=offset,
)
# full
@handle_frontend_test(
fn_tree="torch.full",
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
fill_value=_fill_value(),
dtype=st.shared(helpers.get_dtypes("numeric", full=False), key="dtype"),
)
def test_torch_full(
*,
shape,
fill_value,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
size=shape,
fill_value=fill_value,
dtype=dtype[0],
device=on_device,
)
# full_like
@handle_frontend_test(
fn_tree="torch.full_like",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=st.shared(
helpers.get_dtypes("numeric", full=False), key="dtype"
)
),
fill_value=_fill_value(),
dtype=st.shared(helpers.get_dtypes("numeric", full=False), key="dtype"),
)
def test_torch_full_like(
*,
dtype_and_x,
fill_value,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, inputs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
on_device=on_device,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
input=inputs[0],
fill_value=fill_value,
dtype=dtype[0],
device=on_device,
test_values=False,
)
# heaviside
@handle_frontend_test(
fn_tree="torch.heaviside",
dtype_and_input=_heaviside_helper(),
)
def test_torch_heaviside(
*,
dtype_and_input,
test_flags,
fn_tree,
backend_fw,
on_device,
frontend,
):
input_dtype, data, values = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
input=data[0],
values=values[0],
on_device=on_device,
)
# linspace
@handle_frontend_test(
fn_tree="torch.linspace",
start=st.floats(min_value=-10, max_value=10),
stop=st.floats(min_value=-10, max_value=10),
num=st.integers(min_value=1, max_value=10),
dtype=helpers.get_dtypes("float", full=False),
)
@handle_example(
test_frontend_example=True,
start=np.array(0),
stop=1,
num=2,
dtype=[None],
fn_tree="ivy.functional.frontends.torch.linspace",
)
def test_torch_linspace(
*,
start,
stop,
num,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[] if isinstance(start, float) else ["int64"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
end=stop,
steps=num,
dtype=dtype[0],
device=on_device,
rtol=1e-01,
)
# logspace
@handle_frontend_test(
fn_tree="torch.logspace",
start=st.floats(min_value=-10, max_value=10),
stop=st.floats(min_value=-10, max_value=10),
num=st.integers(min_value=1, max_value=10),
dtype=helpers.get_dtypes("float", full=False),
)
def test_torch_logspace(
*,
start,
stop,
num,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
end=stop,
steps=num,
dtype=dtype[0],
device=on_device,
rtol=1e-01,
)
# ones
@handle_frontend_test(
fn_tree="torch.ones",
size=helpers.ints(min_value=1, max_value=3),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("numeric", full=False),
)
def test_torch_ones(
*,
shape,
size,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dims = {}
size = (size,)
if shape is None:
i = 0
for x_ in size:
dims[f"x{i}"] = x_
i += 1
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
**dims,
size=shape,
dtype=dtype[0],
device=on_device,
)
# ones_like
@handle_frontend_test(
fn_tree="torch.ones_like",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
dtype=helpers.get_dtypes("numeric", full=False),
)
def test_torch_ones_like(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
dtype=dtype[0],
device=on_device,
)
# polar
@handle_frontend_test(
fn_tree="torch.polar",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
test_with_out=st.just(False),
)
def test_torch_polar(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
abs=input[0],
angle=input[0],
)
# range
@handle_frontend_test(
fn_tree="torch.range",
start_stop_step=_start_stop_step(),
dtype=helpers.get_dtypes("float", full=False),
number_positional_args=st.just(3),
)
def test_torch_range(
*,
start_stop_step,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
start, stop, step = start_stop_step
helpers.test_frontend_function(
input_dtypes=[],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
start=start,
end=stop,
step=step,
dtype=dtype[0],
device=on_device,
)
# tensor
@handle_frontend_test(
fn_tree="torch.tensor",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")),
dtype=helpers.get_dtypes("valid", full=False),
)
def test_torch_tensor(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
data=input[0],
dtype=dtype[0],
device=on_device,
)
# zeros
@handle_frontend_test(
fn_tree="torch.zeros",
size=helpers.ints(min_value=1, max_value=3),
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
),
dtype=helpers.get_dtypes("numeric", full=False),
)
def test_torch_zeros(
*,
size,
shape,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dims = {}
size = (size,)
if shape is None:
i = 0
for x_ in size:
dims[f"x{i}"] = x_
i += 1
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
**dims,
size=shape,
dtype=dtype[0],
device=on_device,
)
# zeros_like
@handle_frontend_test(
fn_tree="torch.zeros_like",
dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")),
dtype=helpers.get_dtypes("numeric", full=False),
)
def test_torch_zeros_like(
*,
dtype_and_x,
dtype,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, input = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
dtype=dtype[0],
device=on_device,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_creation_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_creation_ops.py",
"repo_id": "ivy",
"token_count": 10860
} | 55 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_data_layer_norm(
draw,
*,
available_dtypes,
large_abs_safety_factor=100,
small_abs_safety_factor=100,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
ret_shape=True,
abs_smallest_val=1000,
allow_inf=False,
allow_nan=False,
exclude_min=True,
exclude_max=True,
min_value=-1000,
max_value=1000,
shared_dtype=False,
min_dim_size=1,
max_dim_size=3,
group=False,
):
results = draw(
helpers.dtype_values_axis(
available_dtypes=available_dtypes,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
valid_axis=valid_axis,
allow_neg_axes=allow_neg_axes,
max_axes_size=max_axes_size,
force_int_axis=force_int_axis,
ret_shape=ret_shape,
)
)
dtype, values, axis, shape = results
if group:
channel_size = shape[1]
group_list = [*range(1, max_dim_size)]
group_list = list(filter(lambda x: (channel_size % x == 0), group_list))
group_size = draw(st.sampled_from(group_list))
weight_shape = [shape[1]]
bias_shape = [shape[1]]
else:
weight_shape = shape[axis:]
bias_shape = shape[axis:]
arg_dict = {
"available_dtypes": dtype,
"abs_smallest_val": abs_smallest_val,
"min_value": min_value,
"max_value": max_value,
"large_abs_safety_factor": large_abs_safety_factor,
"small_abs_safety_factor": small_abs_safety_factor,
"allow_inf": allow_inf,
"allow_nan": allow_nan,
"exclude_min": exclude_min,
"exclude_max": exclude_max,
"min_num_dims": min_num_dims,
"max_num_dims": max_num_dims,
"shared_dtype": shared_dtype,
"ret_shape": False,
}
results_weight = draw(helpers.dtype_and_values(shape=weight_shape, **arg_dict))
results_bias = draw(helpers.dtype_and_values(shape=bias_shape, **arg_dict))
results_new_std = draw(helpers.dtype_and_values(shape=shape, **arg_dict))
_, weight_values = results_weight
_, bias_values = results_bias
_, new_std_values = results_new_std
axis = shape[axis:]
if group:
return dtype, values, weight_values, bias_values, group_size
return dtype, values, axis, weight_values, bias_values, new_std_values
@st.composite
def _instance_and_batch_norm_helper(draw, *, min_num_dims=1, min_dim_size=1):
x_dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
min_num_dims=min_num_dims,
max_num_dims=4,
min_dim_size=min_dim_size,
ret_shape=True,
)
)
_, variance = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=(shape[1],),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
)
)
_, mean = draw(
helpers.dtype_and_values(
dtype=x_dtype,
shape=(shape[1],),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
)
)
_, others = draw(
st.one_of(
helpers.dtype_and_values(
dtype=x_dtype * 2,
shape=(shape[1],),
large_abs_safety_factor=24,
small_abs_safety_factor=24,
safety_factor_scale="log",
num_arrays=2,
),
st.just(([None, None], [None, None])),
)
)
momentum = draw(helpers.floats(min_value=0.01, max_value=0.1))
eps = draw(helpers.floats(min_value=1e-5, max_value=0.1))
return x_dtype, x[-1], others[0], others[1], mean[0], variance[0], momentum, eps
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="torch.nn.functional.batch_norm",
data=_instance_and_batch_norm_helper(min_num_dims=2, min_dim_size=2),
training=st.booleans(),
)
def test_torch_batch_norm(
*,
data,
training,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, input, weight, bias, running_mean, running_var, momentum, eps = data
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
atol=1e-1,
rtol=1e-1,
fn_tree=fn_tree,
input=input,
running_mean=running_mean,
running_var=running_var,
weight=weight,
bias=bias,
training=training,
momentum=momentum,
eps=eps,
)
# group_norm
@handle_frontend_test(
fn_tree="torch.nn.functional.group_norm",
dtype_x_and_axis=_generate_data_layer_norm(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
max_num_dims=3,
min_dim_size=2,
max_dim_size=4,
group=True,
),
epsilon=st.floats(min_value=0.01, max_value=0.1),
test_with_out=st.just(False),
)
def test_torch_group_norm(
dtype_x_and_axis,
epsilon,
frontend,
test_flags,
fn_tree,
backend_fw,
):
dtype, x, weight, bias, group_size = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-1,
rtol=1e-1,
input=x[0],
num_groups=group_size,
weight=weight[0],
bias=bias[0],
eps=epsilon,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.instance_norm",
data=_instance_and_batch_norm_helper(min_num_dims=3, min_dim_size=2),
use_input_stats=st.booleans(),
)
def test_torch_instance_norm(
*,
data,
use_input_stats,
frontend,
test_flags,
fn_tree,
backend_fw,
):
input_dtype, input, weight, bias, running_mean, running_var, momentum, eps = data
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
atol=1e-1,
rtol=1e-1,
input=input,
running_mean=running_mean,
running_var=running_var,
weight=weight,
bias=bias,
use_input_stats=use_input_stats,
momentum=momentum,
eps=eps,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.layer_norm",
dtype_x_and_axis=_generate_data_layer_norm(
available_dtypes=helpers.get_dtypes("float"),
),
epsilon=st.floats(min_value=0.01, max_value=0.1),
)
def test_torch_layer_norm(
*,
dtype_x_and_axis,
epsilon,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, axis, weight, bias, new_std = dtype_x_and_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
input=x[0],
normalized_shape=axis,
weight=weight[0],
bias=bias[0],
eps=epsilon,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_norms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_norms.py",
"repo_id": "ivy",
"token_count": 4081
} | 56 |
# global
import numpy as np
from hypothesis import assume
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# unique_all
@handle_test(
fn_tree="functional.ivy.unique_all",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
min_dim_size=1,
force_int_axis=True,
valid_axis=True,
),
none_axis=st.booleans(),
by_value=st.booleans(),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="numpy",
)
def test_unique_all(
*, dtype_x_axis, none_axis, by_value, test_flags, backend_fw, fn_name, on_device
):
dtype, x, axis = dtype_x_axis
if none_axis:
axis = None
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
axis=axis,
by_value=by_value,
)
# unique_counts
@handle_test(
fn_tree="functional.ivy.unique_counts",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
min_dim_size=2,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_unique_counts(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x, 0.0)))
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
)
# unique_inverse
@handle_test(
fn_tree="functional.ivy.unique_inverse",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=2,
min_dim_size=2,
force_int_axis=True,
valid_axis=True,
),
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_unique_inverse(*, dtype_x_axis, test_flags, backend_fw, fn_name, on_device):
dtype, x, axis = dtype_x_axis
assume(not np.any(np.any(np.isclose(x[0], 0.0), axis=axis)))
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
axis=axis,
x=x[0],
)
# unique_values
@handle_test(
fn_tree="functional.ivy.unique_values",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
test_gradients=st.just(False),
)
def test_unique_values(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
assume(not np.any(np.isclose(x, 0.0)))
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_set.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_set.py",
"repo_id": "ivy",
"token_count": 1518
} | 57 |
# global
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test, BackendHandler
@handle_test(
fn_tree="functional.ivy.experimental.bernoulli",
dtype_and_probs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=0,
max_value=1,
min_num_dims=0,
),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
ground_truth_backend="torch",
)
def test_bernoulli(
*, dtype_and_probs, seed, test_flags, backend_fw, fn_name, on_device
):
dtype, probs = dtype_and_probs
# torch doesn't support half precision on CPU
assume(
not ("torch" in str(backend_fw) and "float16" in dtype and on_device == "cpu")
)
ret_np_flat_from_target, ret_np_from_gt_flat = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
return_flat_np_arrays=True,
probs=probs[0],
logits=None,
shape=None,
seed=seed,
)
helpers.assert_same_type_and_shape([ret_np_flat_from_target, ret_np_from_gt_flat])
# beta
@handle_test(
fn_tree="functional.ivy.experimental.beta",
dtype_and_alpha_beta=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
min_num_dims=1,
max_num_dims=2,
num_arrays=2,
exclude_min=True,
),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_beta(
*,
dtype_and_alpha_beta,
seed,
backend_fw,
fn_name,
on_device,
test_flags,
):
dtype, alpha_beta = dtype_and_alpha_beta
if "float16" in dtype:
return
ret, ret_gt = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
test_values=False,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
alpha=alpha_beta[0],
beta=alpha_beta[1],
shape=None,
dtype=dtype[0],
seed=seed,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for u, v in zip(ret, ret_gt):
assert ivy_backend.all(u >= 0)
assert ivy_backend.all(u <= 1)
assert ivy_backend.all(v >= 0)
assert ivy_backend.all(v <= 1)
# dirichlet
@handle_test(
fn_tree="functional.ivy.experimental.dirichlet",
dtype_and_alpha=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(
st.integers(min_value=2, max_value=5),
),
min_value=0,
max_value=100,
exclude_min=True,
),
size=st.tuples(
st.integers(min_value=2, max_value=5), st.integers(min_value=2, max_value=5)
),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_dirichlet(
*, dtype_and_alpha, size, seed, test_flags, backend_fw, fn_name, on_device
):
dtype, alpha = dtype_and_alpha
assume("bfloat16" not in dtype)
def call():
return helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
test_values=False,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
alpha=alpha[0],
size=size,
seed=seed,
)
ret, ret_gt = call()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
if seed:
ret1, ret_gt1 = call()
assert ivy_backend.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
u, v = ivy_backend.array(u), ivy_backend.array(v)
assert ivy_backend.all(
ivy_backend.sum(u, axis=-1) == ivy_backend.sum(v, axis=-1)
)
assert ivy_backend.all(u >= 0)
assert ivy_backend.all(u <= 1)
assert ivy_backend.all(v >= 0)
assert ivy_backend.all(v <= 1)
# gamma
@handle_test(
fn_tree="functional.ivy.experimental.gamma",
dtype_and_alpha_beta=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
min_num_dims=1,
max_num_dims=2,
num_arrays=2,
exclude_min=True,
),
seed=helpers.ints(min_value=0, max_value=100),
test_gradients=st.just(False),
)
def test_gamma(
*, dtype_and_alpha_beta, seed, test_flags, backend_fw, fn_name, on_device
):
dtype, alpha_beta = dtype_and_alpha_beta
if "float16" in dtype:
return
ret, ret_gt = helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
test_values=False,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
alpha=alpha_beta[0],
beta=alpha_beta[1],
shape=None,
dtype=dtype[0],
seed=seed,
)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
with BackendHandler.update_backend(backend_fw) as ivy_backend:
for u, v in zip(ret, ret_gt):
assert ivy_backend.all(u >= 0)
assert ivy_backend.all(v >= 0)
# poisson
# TODO: Enable gradient tests (test_gradients) once random generation
# is unified
@handle_test(
fn_tree="functional.ivy.experimental.poisson",
dtype_and_lam=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=False),
min_value=-2,
max_value=5,
min_num_dims=0,
),
dtype=helpers.get_dtypes("float", full=False),
seed=helpers.ints(min_value=0, max_value=100),
fill_value=helpers.floats(min_value=0, max_value=1),
test_gradients=st.just(False),
)
def test_poisson(
*,
dtype_and_lam,
dtype,
seed,
fill_value,
test_flags,
backend_fw,
fn_name,
on_device,
):
lam_dtype, lam = dtype_and_lam
def call():
return helpers.test_function(
input_dtypes=lam_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
test_values=False,
lam=lam[0],
shape=None,
dtype=dtype[0],
seed=seed,
fill_value=fill_value,
)
ret, ret_gt = call()
if seed:
ret1, ret_gt1 = call()
with BackendHandler.update_backend(backend_fw) as ivy_backend:
assert ivy_backend.any(ret == ret1)
ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw)
ret_gt = helpers.flatten_and_to_np(
ret=ret_gt, backend=test_flags.ground_truth_backend
)
for u, v in zip(ret, ret_gt):
assert u.dtype == v.dtype
assert u.shape == v.shape
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_random.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_random.py",
"repo_id": "ivy",
"token_count": 3729
} | 58 |
"""Collection of tests for unified neural network layers."""
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_data_layer_norm(
draw,
*,
available_dtypes,
large_abs_safety_factor=20,
small_abs_safety_factor=20,
safety_factor_scale="log",
min_num_dims=1,
max_num_dims=5,
valid_axis=True,
allow_neg_axes=False,
max_axes_size=1,
force_int_axis=True,
ret_shape=True,
abs_smallest_val=0.1,
allow_inf=False,
allow_nan=False,
exclude_min=False,
exclude_max=False,
min_value=-1e20,
max_value=1e20,
shared_dtype=False,
):
results = draw(
helpers.dtype_values_axis(
available_dtypes=available_dtypes,
min_value=min_value,
max_value=max_value,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
abs_smallest_val=abs_smallest_val,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
valid_axis=valid_axis,
allow_neg_axes=allow_neg_axes,
max_axes_size=max_axes_size,
force_int_axis=force_int_axis,
ret_shape=ret_shape,
)
)
dtype, values, axis, shape = results
weight_shape = shape[axis:]
bias_shape = shape[axis:]
normalized_idxs = list(range(axis, len(shape)))
arg_dict = {
"available_dtypes": dtype,
"abs_smallest_val": abs_smallest_val,
"min_value": min_value,
"max_value": max_value,
"large_abs_safety_factor": large_abs_safety_factor,
"small_abs_safety_factor": small_abs_safety_factor,
"safety_factor_scale": safety_factor_scale,
"allow_inf": allow_inf,
"allow_nan": allow_nan,
"exclude_min": exclude_min,
"exclude_max": exclude_max,
"min_num_dims": min_num_dims,
"max_num_dims": max_num_dims,
"shared_dtype": shared_dtype,
"ret_shape": False,
}
results_weight = draw(helpers.dtype_and_values(shape=weight_shape, **arg_dict))
results_bias = draw(helpers.dtype_and_values(shape=bias_shape, **arg_dict))
_, weight_values = results_weight
_, bias_values = results_bias
return dtype, values, normalized_idxs, weight_values, bias_values
# --- Main --- #
# ------------ #
@handle_test(
fn_tree="functional.ivy.layer_norm",
values_tuple=_generate_data_layer_norm(
available_dtypes=helpers.get_dtypes("float"),
),
new_std=st.floats(min_value=0.01, max_value=0.1),
eps=st.floats(min_value=0.01, max_value=0.1),
)
def test_layer_norm(
*, values_tuple, new_std, eps, test_flags, backend_fw, fn_name, on_device
):
dtype, x, normalized_idxs, scale, offset = values_tuple
helpers.test_function(
input_dtypes=dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
rtol_=0.5,
atol_=0.5,
xs_grad_idxs=[[0, 0]],
x=x[0],
normalized_idxs=normalized_idxs,
eps=eps,
scale=scale[0],
offset=offset[0],
new_std=new_std,
)
| ivy/ivy_tests/test_ivy/test_functional/test_nn/test_norms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_nn/test_norms.py",
"repo_id": "ivy",
"token_count": 1635
} | 59 |
import pytest
from hypothesis import given
import hypothesis.strategies as st
from ivy import handle_exceptions
from ivy.utils.exceptions import (
IvyError,
IvyNotImplementedException,
IvyBroadcastShapeError,
IvyValueError,
InplaceUpdateException,
IvyException,
IvyIndexError,
IvyAttributeError,
IvyBackendException,
IvyDeviceError,
IvyInvalidBackendException,
IvyDtypePromotionError,
_non_ivy_exceptions_mapping,
)
@handle_exceptions
def func(e):
if e is None:
return
raise e()
@pytest.mark.parametrize(
"e",
[
IvyError,
IvyNotImplementedException,
IvyBroadcastShapeError,
IvyValueError,
InplaceUpdateException,
IvyException,
IvyIndexError,
IvyAttributeError,
IvyBackendException,
IvyDeviceError,
IvyInvalidBackendException,
IvyDtypePromotionError,
],
)
def test_ivy_errors_raising(e):
with pytest.raises(e):
func(e)
def test_no_exception():
func(None)
@pytest.mark.parametrize(
("e", "to_be_raised"),
_non_ivy_exceptions_mapping.items(),
)
def test_non_ivy_errors_mapping(e, to_be_raised):
with pytest.raises(
to_be_raised,
) as raised:
func(e)
assert issubclass(raised.type, to_be_raised)
@given(
e=st.sampled_from(
[
Exception,
ZeroDivisionError,
BufferError,
AssertionError,
ImportError,
KeyError,
LookupError,
]
)
)
def test_non_ivy_errors_raising(e):
with pytest.raises(IvyBackendException):
func(e)
| ivy/ivy_tests/test_ivy/test_misc/test_handle_exceptions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_handle_exceptions.py",
"repo_id": "ivy",
"token_count": 772
} | 60 |
[build-system]
requires = [
"setuptools>=42",
"wheel",
"pip"
]
build-backend = "setuptools.build_meta"
[tool.docformatter]
wrap-summaries = 88
pre-summary-newline = true
[tool.autoflake]
in-place = true
remove-all-unused-imports = true
ignore-init-module-imports = true
remove-duplicate-keys = true
remove-unused-variables = true
quiet = true
ignore-pass-after-docstring = true
exclude = ["__init__.py"]
[tool.ruff]
line-length = 88
target-version = "py38"
[tool.ruff.lint]
select = [
# pyflakes
"F",
# pycodestyle
"E", "W",
# pydocstyle
"D",
"I002", # Missing required import.
"UP008", # Checks for super calls that pass redundant arguments.
"G010", # deprecated-log-warn.
"PLR1722", # Use sys.exit() instead of exit() and quit().
"TRY004", # Prefer TypeError exception for invalid type.
"PT014", # pytest-duplicate-parametrize-test-cases.
"PT006", # Checks for the type of parameter names passed to pytest.mark.parametrize.
"PT007", # Checks for the type of parameter values passed to pytest.mark.parametrize.
"PT018", # Checks for assertions that combine multiple independent conditions.
]
ignore = [
"E203", # Whitespace-before-punctuation.
"E402", # Module-import-not-at-top-of-file.
"E731", # Do not assign a lambda expression, use a def.
"D100", # Missing docstring in public module.
"D101", # Missing docstring in public class.
"D102", # Missing docstring in public method.
"D103", # Missing docstring in public function.
"D104", # Missing docstring in public package.
"D105", # Missing docstring in magic method.
"D106", # Missing docstring in public nested class.
"D107", # Missing docstring in `__init__`.
"D203", # 1 blank line required before class docstring.
"D205", # 1 blank line required between summary line and description.
"D212", # Multi-line docstring summary should start at the first line.
"D213", # Multi-line docstring summary should start at the second line.
"D209", # Multi-line docstring closing quotes should be on a separate line.
"D400", # First line should end with a period.
"D413", # Missing blank line after last section of docstrings.
"D401", # First line of docstring should be in imperative mood.
"D415", # First line should end with a period, question mark, or exclamation point.
"D416", # Section name should end with a colon ("Attributes").
"D417", # Missing argument description in the docstring for argument "X".
]
[tool.ruff.lint.per-file-ignores]
'ivy/functional/(frontends|backends)/(?!.*/func_wrapper\.py$).*(?!__init__\.py$)' = ["D"]
"**/__init__.py" = ["F401","F403","F405","F811","F821", "E501"]
"ivy/functional/frontends/paddle/**" = ["F401", "F403", "F405"]
| ivy/pyproject.toml/0 | {
"file_path": "ivy/pyproject.toml",
"repo_id": "ivy",
"token_count": 1004
} | 61 |
import sys
from get_all_tests import get_all_tests
N = 40
def main():
run_iter = int(sys.argv[1]) - 1
test_names = get_all_tests()
num_tests = len(test_names)
tests_per_run = num_tests // N
start = run_iter * tests_per_run
end = num_tests if run_iter == N - 1 else (run_iter + 1) * tests_per_run
with open("tests_to_run", "w") as f:
for test in test_names[start:end]:
f.write(test + "\n")
if __name__ == "__main__":
main()
| ivy/scripts/setup_tests/filter_tests.py/0 | {
"file_path": "ivy/scripts/setup_tests/filter_tests.py",
"repo_id": "ivy",
"token_count": 211
} | 62 |