file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
src/operators/nn/functional/logsoftmax.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: NNTrait::logsoftmax docstring
fn logsoftmax<
T, impl TTensor: TensorTrait<T>, impl TDivTensor: Div<Tensor<T>>, impl TDrop: Drop<T>
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor = z.exp();
let sum = exp_tensor
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(true),
Option::Some(false)
);
let softmax = exp_tensor / sum;
let logsoftmax = softmax.log();
logsoftmax
}
/// Cf: NNTrait::logsoftmax docstring
fn logsoftmaxWide<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TFixed: FixedTrait<T, TMAG>,
impl WFixed: FixedTrait<W, WMAG>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor: Tensor<W> = exp_upcast(*z);
let sum = exp_tensor
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(true),
Option::Some(false)
);
let softmax = div_downcast(@exp_tensor, @sum);
softmax.log()
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/relu.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::relu docstring
fn relu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if (*item) < NumberTrait::zero() {
data_result.append(NumberTrait::zero());
} else {
data_result.append(*item);
};
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/sigmoid.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::sigmoid docstring
fn sigmoid<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = NumberTrait::one()
/ (NumberTrait::one() + (*item * NumberTrait::neg_one()).exp());
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/softmax.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: NNTrait::softmax docstring
fn softmax<
T,
impl TTensor: TensorTrait<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
z: @Tensor<T>, axis: Option<i32>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => -1
};
let exp_tensor = z.exp();
let sum = exp_tensor
.reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false));
exp_tensor / sum
}
/// Cf: NNTrait::softmax docstring
fn softmaxWide<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TFixed: FixedTrait<T, TMAG>,
impl WFixed: FixedTrait<W, WMAG>,
>(
z: @Tensor<T>, axis: Option<i32>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => -1
};
let exp_tensor: Tensor<W> = exp_upcast(*z);
let sum = exp_tensor
.reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false));
div_downcast(@exp_tensor, @sum)
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/softmax_zero.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
/// Cf: NNTrait::softmax_zero docstring
fn softmax_zero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialEq: PartialEq<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAddEq: AddEq<T>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor = exp_zero(*z);
let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true);
exp_tensor / sum_no_zero
}
/// Cf: NNTrait::softmax_zero docstring
fn softmaxWide_zero<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TNumber: NumberTrait<T, TMAG>,
impl WNumber: NumberTrait<W, WMAG>,
impl TPartialEq: PartialEq<T>,
impl WPartialEq: PartialEq<W>,
impl TAddEq: AddEq<T>,
impl WAddEq: AddEq<W>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor: Tensor<W> = exp_upcast_zero(*z);
let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true);
div_downcast(@exp_tensor, @sum_no_zero)
}
/// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero.
///
/// # Arguments
/// * `z` - The input tensor.
///
/// # Returns
/// * A Tensor<T> representing the exponential of the tensor except for the entries equal to zero in the input tensor, they remain zero.
fn exp_zero<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if *item == NumberTrait::zero() {
result.append(NumberTrait::zero());
} else {
result.append((*item).exp());
}
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, result.span())
}
/// Helper function that compute the exponential of a tensor except if the value of an entry is zero, the value remains zero.
///
/// # Arguments
/// * `z` - The input tensor.
///
/// # Returns
/// * A Tensor<T> representing the exponential of the tensor except for the entries equal to zero in the input tensor, they remain zero.
fn exp_upcast_zero<
T,
TMAG,
W,
WMAG,
impl TNumber: NumberTrait<T, TMAG>,
impl TTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WNumber: NumberTrait<W, WMAG>,
impl WTensor: TensorTrait<W>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TIntoW: Into<T, W>,
>(
mut self: Tensor<T>
) -> Tensor<W> {
let mut result: Array<W> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if *item == NumberTrait::zero() {
result.append(NumberTrait::zero());
} else {
result.append((TIntoW::into(*item)).exp());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
/// Helper function that compute the reduce sum making sure no none zero value are in the output tensor.
///
/// # Arguments
/// * `z` - The input tensor.
///
/// # Returns
/// * A Tensor<T> representing the ereduce sum with no entries equal to zero.
fn reduce_sum_no_zero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let mut output_data: Array<T> = array![];
if (*self.shape).len() == 1 {
assert(axis == 0, 'axis out of dimensions');
let current_sum = accumulate_sum::<T>(*self.data, *self.shape, *self.shape, axis);
output_data.append(current_sum);
let mut output_shape: Array<usize> = array![];
output_shape.append(1);
return TensorTrait::new(output_shape.span(), output_data.span());
} else {
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let mut current_sum = accumulate_sum::<
T
>(*self.data, *self.shape, output_indices, axis);
if current_sum == NumberTrait::zero() {
current_sum = NumberTrait::one();
}
output_data.append(current_sum);
index += 1;
};
if keepdims {
let output_shape = reduce_output_shape(*self.shape, axis, true);
TensorTrait::<T>::new(output_shape, output_data.span())
} else {
TensorTrait::<T>::new(output_shape, output_data.span())
}
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/softplus.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
/// Cf: NNTrait::softplus docstring
fn softplus<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = (FixedTrait::ONE() + (*item).exp()).ln();
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/softsign.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::softsign docstring
fn softsign<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = *item / (FixedTrait::ONE() + (*item).abs());
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/space_to_depth.cairo | use core::option::OptionTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
/// Cf: NNTrait::space_to_depth docstring
fn space_to_depth<
T,
impl TTensor: TensorTrait<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, blocksize: usize
) -> Tensor<T> {
assert((tensor.shape).len() == 4, 'Unexpected shape 4.');
let blocksize_i32: i32 = blocksize.try_into().unwrap();
let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap();
let C: i32 = (*(tensor.shape).at(1)).try_into().unwrap();
let H: u32 = (*(tensor.shape).at(2));
let W: u32 = (*(tensor.shape).at(3));
let tmpshape = array![
b,
C,
(H / blocksize).try_into().unwrap(),
blocksize_i32,
(W / blocksize).try_into().unwrap(),
blocksize_i32
];
let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false);
let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span());
let finalshape = array![
b,
C * blocksize_i32 * blocksize_i32,
(H / blocksize).try_into().unwrap(),
(W / blocksize).try_into().unwrap()
];
transposed.reshape(target_shape: finalshape.span(), allowzero: false)
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/functional/thresholded_relu.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::thresholded_relu docstring
fn thresholded_relu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>, alpha: @T
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if (*item) <= (*alpha) {
data_result.append(NumberTrait::zero());
} else {
data_result.append(*item);
};
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations.cairo | mod nn_i8;
mod nn_i32;
mod nn_u32;
mod nn_fp8x23;
mod nn_fp16x16;
mod nn_fp64x64;
mod nn_fp32x32;
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_fp16x16.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::{
FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd
};
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W
};
use orion::operators::tensor::implementations::tensor_fp16x16wide::{
FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd
};
impl FP16x16NN of NNTrait<FP16x16> {
fn relu(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP16x16>, axis: Option<i32>) -> Tensor<FP16x16> {
functional::softmax::softmaxWide::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
functional::softmax_zero::softmaxWide_zero::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
functional::logsoftmax::logsoftmaxWide::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn softsign(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP16x16>, weights: Tensor<FP16x16>, bias: Tensor<FP16x16>
) -> Tensor<FP16x16> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP16x16>, alpha: @FP16x16) -> Tensor<FP16x16> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP16x16>, alpha: @FP16x16) -> Tensor<FP16x16> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP16x16>, alpha: @FP16x16, beta: @FP16x16) -> Tensor<FP16x16> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP16x16>, blocksize: usize, mode: felt252
) -> Tensor<FP16x16> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP16x16>, blocksize: usize) -> Tensor<FP16x16> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP16x16>,
B: Tensor<FP16x16>,
C: Option<Tensor<FP16x16>>,
alpha: Option<FP16x16>,
beta: Option<FP16x16>,
transA: bool,
transB: bool
) -> Tensor<FP16x16> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP16x16>,
grid: @Tensor<FP16x16>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP16x16> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP16x16>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP16x16>,
W: @Tensor<FP16x16>,
B: Option<@Tensor<FP16x16>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP16x16>,
W: @Tensor<FP16x16>,
B: Option<Span<FP16x16>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_fp32x32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x32Impl};
use orion::operators::tensor::implementations::tensor_fp32x32::{
FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd
};
impl FP32x32NN of NNTrait<FP32x32> {
fn relu(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP32x32>, axis: Option<i32>) -> Tensor<FP32x32> {
functional::softmax::softmax(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
functional::softmax_zero::softmax_zero(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
functional::logsoftmax::logsoftmax(tensor, axis)
}
fn softsign(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP32x32>, weights: Tensor<FP32x32>, bias: Tensor<FP32x32>
) -> Tensor<FP32x32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP32x32>, alpha: @FP32x32) -> Tensor<FP32x32> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP32x32>, alpha: @FP32x32) -> Tensor<FP32x32> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP32x32>, alpha: @FP32x32, beta: @FP32x32) -> Tensor<FP32x32> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP32x32>, blocksize: usize, mode: felt252
) -> Tensor<FP32x32> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP32x32>, blocksize: usize) -> Tensor<FP32x32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP32x32>,
B: Tensor<FP32x32>,
C: Option<Tensor<FP32x32>>,
alpha: Option<FP32x32>,
beta: Option<FP32x32>,
transA: bool,
transB: bool
) -> Tensor<FP32x32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP32x32>,
grid: @Tensor<FP32x32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP32x32> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP32x32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP32x32>,
W: @Tensor<FP32x32>,
B: Option<@Tensor<FP32x32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP32x32>,
W: @Tensor<FP32x32>,
B: Option<Span<FP32x32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_fp64x64.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x64Impl};
use orion::operators::tensor::implementations::tensor_fp64x64::{
FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd
};
impl FP64x64NN of NNTrait<FP64x64> {
fn relu(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP64x64>, axis: Option<i32>) -> Tensor<FP64x64> {
functional::softmax::softmax(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
functional::softmax_zero::softmax_zero(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
functional::logsoftmax::logsoftmax(tensor, axis)
}
fn softsign(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP64x64>, weights: Tensor<FP64x64>, bias: Tensor<FP64x64>
) -> Tensor<FP64x64> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP64x64>, alpha: @FP64x64) -> Tensor<FP64x64> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP64x64>, alpha: @FP64x64) -> Tensor<FP64x64> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP64x64>, alpha: @FP64x64, beta: @FP64x64) -> Tensor<FP64x64> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP64x64>, blocksize: usize, mode: felt252
) -> Tensor<FP64x64> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP64x64>, blocksize: usize) -> Tensor<FP64x64> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP64x64>,
B: Tensor<FP64x64>,
C: Option<Tensor<FP64x64>>,
alpha: Option<FP64x64>,
beta: Option<FP64x64>,
transA: bool,
transB: bool
) -> Tensor<FP64x64> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP64x64>,
grid: @Tensor<FP64x64>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP64x64> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP64x64>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP64x64>,
W: @Tensor<FP64x64>,
B: Option<@Tensor<FP64x64>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP64x64>,
W: @Tensor<FP64x64>,
B: Option<Span<FP64x64>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_fp8x23.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::{
FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd
};
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W
};
use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor};
impl FP8x23NN of NNTrait<FP8x23> {
fn relu(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP8x23>, axis: Option<i32>) -> Tensor<FP8x23> {
functional::softmax::softmaxWide::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
functional::softmax_zero::softmaxWide_zero::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
functional::logsoftmax::logsoftmaxWide::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn softsign(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP8x23>, weights: Tensor<FP8x23>, bias: Tensor<FP8x23>
) -> Tensor<FP8x23> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP8x23>, alpha: @FP8x23) -> Tensor<FP8x23> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP8x23>, alpha: @FP8x23) -> Tensor<FP8x23> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP8x23>, alpha: @FP8x23, beta: @FP8x23) -> Tensor<FP8x23> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(tensor: @Tensor<FP8x23>, blocksize: usize, mode: felt252) -> Tensor<FP8x23> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP8x23>, blocksize: usize) -> Tensor<FP8x23> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP8x23>,
B: Tensor<FP8x23>,
C: Option<Tensor<FP8x23>>,
alpha: Option<FP8x23>,
beta: Option<FP8x23>,
transA: bool,
transB: bool
) -> Tensor<FP8x23> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP8x23>,
grid: @Tensor<FP8x23>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP8x23> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP8x23>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP8x23>,
W: @Tensor<FP8x23>,
B: Option<@Tensor<FP8x23>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP8x23>,
W: @Tensor<FP8x23>,
B: Option<Span<FP8x23>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_i32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd};
impl I32NN of NNTrait<i32> {
fn relu(tensor: @Tensor<i32>) -> Tensor<i32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<i32>, axis: Option<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<i32>, axis: usize) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<i32>, axis: usize) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<i32>, weights: Tensor<i32>, bias: Tensor<i32>) -> Tensor<i32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<i32>, alpha: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<i32>, alpha: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<i32>, alpha: @i32, beta: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<i32>, blocksize: usize, mode: felt252) -> Tensor<i32> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<i32>, blocksize: usize) -> Tensor<i32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<i32>,
B: Tensor<i32>,
C: Option<Tensor<i32>>,
alpha: Option<i32>,
beta: Option<i32>,
transA: bool,
transB: bool
) -> Tensor<i32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<i32>,
grid: @Tensor<i32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<i32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<i32>,
W: @Tensor<i32>,
B: Option<@Tensor<i32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<i32>,
W: @Tensor<i32>,
B: Option<Span<i32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_i8.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd};
impl I8NN of NNTrait<i8> {
fn relu(tensor: @Tensor<i8>) -> Tensor<i8> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<i8>, axis: Option<i32>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<i8>, axis: usize) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<i8>, axis: usize) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<i8>, weights: Tensor<i8>, bias: Tensor<i8>) -> Tensor<i8> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<i8>, alpha: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<i8>, alpha: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<i8>, alpha: @i8, beta: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<i8>, blocksize: usize, mode: felt252) -> Tensor<i8> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<i8>, blocksize: usize) -> Tensor<i8> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<i8>,
B: Tensor<i8>,
C: Option<Tensor<i8>>,
alpha: Option<i8>,
beta: Option<i8>,
transA: bool,
transB: bool
) -> Tensor<i8> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<i8>,
grid: @Tensor<i8>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<i8>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<i8>,
W: @Tensor<i8>,
B: Option<@Tensor<i8>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<i8>,
W: @Tensor<i8>,
B: Option<Span<i8>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/nn/implementations/nn_u32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd};
impl U32NN of NNTrait<u32> {
fn relu(tensor: @Tensor<u32>) -> Tensor<u32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<u32>, axis: Option<i32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<u32>, axis: usize) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<u32>, axis: usize) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<u32>, weights: Tensor<u32>, bias: Tensor<u32>) -> Tensor<u32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<u32>, alpha: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<u32>, alpha: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<u32>, alpha: @u32, beta: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<u32>, blocksize: usize, mode: felt252) -> Tensor<u32> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<u32>, blocksize: usize) -> Tensor<u32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<u32>,
B: Tensor<u32>,
C: Option<Tensor<u32>>,
alpha: Option<u32>,
beta: Option<u32>,
transA: bool,
transB: bool
) -> Tensor<u32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<u32>,
grid: @Tensor<u32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<u32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<u32>,
W: @Tensor<u32>,
B: Option<@Tensor<u32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<u32>,
W: @Tensor<u32>,
B: Option<Span<u32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence.cairo | mod core;
mod implementations;
mod functional;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::implementations::sequence_fp8x23::FP8x23Sequence;
use orion::operators::sequence::implementations::sequence_fp8x23wide::FP8x23WSequence;
use orion::operators::sequence::implementations::sequence_fp16x16::FP16x16Sequence;
use orion::operators::sequence::implementations::sequence_fp16x16wide::FP16x16WSequence;
use orion::operators::sequence::implementations::sequence_i8::I8Sequence;
use orion::operators::sequence::implementations::sequence_i32::I32Sequence;
use orion::operators::sequence::implementations::sequence_u32::U32Sequence;
use orion::operators::sequence::implementations::sequence_bool::BoolSequence;
| https://github.com/gizatechxyz/orion |
src/operators/sequence/core.cairo | use orion::operators::tensor::core::Tensor;
/// Trait
///
/// sequence_construct - Constructs a tensor sequence containing the input tensors.
/// sequence_empty - Returns an empty tensor sequence.
/// sequence_length - Returns the length of the input sequence.
/// sequence_insert - Insert a tensor into a sequence.
/// sequence_at - Outputs the tensor at the specified position in the input sequence.
/// sequence_erase – Outputs the tensor sequence with the erased tensor at the specified position.
/// concat_from_sequence - Concatenate a sequence of tensors into a single tensor.
trait SequenceTrait<T> {
/// ## sequence.sequence_construct
///
/// ```rust
/// fn sequence_construct(tensors: Array<Tensor<T>>) -> Array<Tensor<T>>;
/// ```
///
/// Constructs a tensor sequence containing the input tensors.
///
/// ## Args
///
/// * `tensors`(`Array<Tensor<T>>`) - The array of input tensors.
///
/// ## Panics
///
/// * Panics if input tensor array is empty.
///
/// ## Returns
///
/// A tensor sequence `Array<Tensor<T>>` containing the input tensors.
///
/// ## Examples
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// use orion::operators::sequence::SequenceTrait;
///
/// fn sequence_construct_example() -> Array<Tensor<usize>> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span());
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![4, 5, 6, 7].span());
/// let result = SequenceTrait::sequence_construct(tensors: array![tensor1, tensor2]);
/// return result;
/// }
/// >>> [[0, 1, 2, 3], [4, 5, 6, 7]]
/// ```
///
fn sequence_construct(tensors: Array<Tensor<T>>) -> Array<Tensor<T>>;
/// ## sequence.sequence_empty
///
/// ```rust
/// fn sequence_empty() -> Array<Tensor<T>>;
/// ```
///
/// Returns an empty tensor sequence.
///
/// ## Args
///
/// ## Returns
///
/// An empty `Array<Tensor<T>>` instance.
///
/// ## Examples
///
/// Let's create a new empty sequence.
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{
/// TensorTrait, // we import the trait
/// Tensor, // we import the type
/// U32Tensor // we import the implementation.
/// };
/// use orion::operators::sequence::SequenceTrait;
///
/// fn sequence_empty_example() -> Array<Tensor<u32>> {
/// let sequence = SequenceTrait::sequence_empty();
///
/// return sequence;
/// }
///
/// >>> []
/// ```
///
fn sequence_empty() -> Array<Tensor<T>>;
/// # tensor.sequence_length
///
/// ```rust
/// fn sequence_length(self: Array<Tensor<T>>) -> Tensor<u32>;
/// ```
///
/// Returns the length of the input sequence.
///
/// ## Args
///
/// * `self`(`Array<Tensor<T>>`) - The input sequence.
///
/// ## Returns
///
/// The length of the sequence as scalar, i.e. a tensor of shape [].
///
/// ## Examples
///
/// Let's create new u32 Tensor with constant 42.
///
/// ```rust
/// let mut sequence = ArrayTrait::new();
///
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(1);
/// shape.append(2);
///
/// let mut data = ArrayTrait::new();
/// data.append(3);
/// data.append(1);
///
/// sequence.append(TensorTrait::new(shape.span(), data.span()));
///
/// sequence.sequence_length()
/// >>> [1]
/// ```
///
fn sequence_length(self: Array<Tensor<T>>) -> Tensor<u32>;
/// # tensor.sequence_insert
///
/// ```rust
/// fn sequence_insert(self: Array<Tensor<T>>, tensor: @Tensor<T>, position: Option<Tensor<i32>>) -> Array<Tensor<T>>;
/// ```
///
/// Returns a tensor sequence that inserts 'tensor' into 'self' at 'position'.
///
/// ## Args
///
/// * `self`(`Array<Tensor<T>>`) - input sequence.
/// * `tensor` (`@Tensor<T>`) - the tensor to insert.
/// * `position` (`@Tensor<i32>`) - the index for insertion (default: -1).
///
/// ## Returns
///
/// Tensor sequence containing 'tensor' inserted into 'self' at 'position'.
///
/// ## Examples
///
/// Let's insert the tensor [2] into the sequence [[1], [3]] at position 1.
///
/// ```rust
/// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor, U32Tensor};
///
/// fn sequence_insert_example() -> Array<Tensor<u32>> {
/// // Prepare sequence
/// let mut sequence = ArrayTrait::new();
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(1);
///
/// let mut data = ArrayTrait::new();
/// data.append(1);
/// sequence.append(TensorTrait::new(shape.span(), data.span()));
/// let mut data = ArrayTrait::new();
/// data.append(3);
///
/// sequence.append(TensorTrait::new(shape.span(), data.span()));
///
/// // Prepare input tensor
/// let mut data = ArrayTrait::new();
/// data.append(2);
/// let tensor = TensorTrait::new(shape.span(), data.span());
///
/// // Prepare position
/// let mut shape = ArrayTrait::<usize>::new();
/// let mut data = ArrayTrait::<i32>::new();
/// data.append(1);
/// let position = TensorTrait::<i32>::new(shape.span(), data.span())
///
/// let sequence = self.sequence_insert(tensor, Option::Some(position));
///
/// return sequence;
/// }
///
/// >>> [[1], [2], [3]]
/// ```
///
fn sequence_insert(
self: Array<Tensor<T>>, tensor: @Tensor<T>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>>;
/// ## tensor.sequence_at
///
/// ```rust
/// fn sequence_at(sequence: Array<Tensor<T>>, position: Tensor<i32>) -> Tensor<T>;
/// ```
///
/// Outputs the tensor at the specified position in the input sequence.
///
/// ## Args
///
/// * `tensors`(`Array<Tensor<T>>`) - The tensor sequence.
/// * `position`(`Tensor<i32>`) - The position tensor.
///
/// ## Panics
///
/// * Panics if position is not a scalar
/// * Panics if position is out of bounds [-n, n - 1]
///
/// ## Returns
///
/// The tensor `Tensor<T>` from the sequence at the specified position.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor};
///
/// fn sequence_at_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span());
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![4, 5, 6, 7].span());
///
/// let mut sequence = ArrayTrait::new();
/// sequence.append(tensor1);
/// sequence.append(tensor2);
///
/// let position = TensorTrait::new(shape: array![].span(), data: array![IntegerTrait::new(1, false)].span());
///
/// let result = TensorTrait::sequence_at(sequence, position);
/// return result;
/// }
/// >>> [4, 5, 6, 7]
/// ```
///
fn sequence_at(sequence: Array<Tensor<T>>, position: Tensor<i32>) -> Tensor<T>;
/// ## tensor.sequence_erase
///
/// ```rust
/// fn sequence_erase(sequence: Array<Tensor<T>>, position: Option<Tensor<i32>>) -> Array<Tensor<T>>;
/// ```
///
/// Outputs the tensor sequence with the erased tensor at the specified position.
///
/// ## Args
///
/// * `tensors`(`Array<Tensor<T>>`) - The tensor sequence.
/// * `position`(`Option<Tensor<i32>>`) - The optional position tensor (by default erases the last tensor).
///
/// ## Panics
///
/// * Panics if position is not a scalar
/// * Panics if position is out of bounds [-n, n - 1]
///
/// ## Returns
///
/// The tensor sequence `Array<Tensor<T>>` with the erased tensor at the specified position.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor};
///
/// fn sequence_erase_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span());
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![4, 5, 6, 7].span());
/// let tensor3 = TensorTrait::new(shape: array![2, 2].span(), data: array![8, 9, 10, 11].span());
///
/// let mut sequence = ArrayTrait::new();
/// sequence.append(tensor1);
/// sequence.append(tensor2);
/// sequence.append(tensor3);
///
/// let position = TensorTrait::new(shape: array![].span(), data: array![IntegerTrait::new(1, false)].span());
///
/// let result = TensorTrait::sequence_erase(sequence, position);
/// return result;
/// }
/// >>> [[0, 1, 2, 3], [8, 9, 10, 11]]
/// ```
///
fn sequence_erase(
sequence: Array<Tensor<T>>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>>;
/// # tensor.concat_from_sequence
///
/// ```rust
/// fn concat_from_sequence(sequence: Array<Tensor<T>>, axis: i32, new_axis: Option<usize>) -> Tensor<T>;
/// ```
///
/// Concatenate a sequence of tensors into a single tensor.
///
/// ## Args
///
/// * `sequence`(`Array<Tensor<T>>`) - The input sequence.
/// * `axis`(`i32`) - Axis to concat on.
/// * `new_axis`(`Option<usize>`) - Optionally added new axis.
///
/// ## Panics
///
/// * Panics if new_axis not 0 or 1 (if value provided).
/// * Panics if axis not in accepted ranges.
/// * Panics if sequence length is not greater than 1.
///
/// ## Returns
///
/// A new `Tensor<T>` concatenated tensor from the input tensor sequence.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn concat_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
///
/// let mut sequence = ArrayTrait::new();
/// sequence.append(tensor1);
/// sequence.append(tensor2);
///
/// let result = TensorTrait::concat_from_sequence(sequence: sequence, axis: 0, new_axis: Option::Some(0));
/// return result;
/// }
/// >>> [[0. 1.]
/// [2. 3.],
/// [0. 1.]
/// [2. 3.]]
///
/// result.shape
/// >>> (4, 2)
///
/// let result = TensorTrait::concat_from_sequence(sequence: sequence, axis: 1, new_axis: Option::Some(0));
/// return result;
/// }
/// >>> [[0. 1., 0., 1.]
/// [2. 3., 2., 3.]]
///
/// result.shape
/// >>> (2, 4 )
/// ```
///
fn concat_from_sequence(
sequence: Array<Tensor<T>>, axis: i32, new_axis: Option<usize>
) -> Tensor<T>;
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional.cairo | mod sequence_construct;
mod sequence_empty;
mod sequence_at;
mod sequence_erase;
mod sequence_insert;
mod sequence_length;
mod concat_from_sequence;
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/concat_from_sequence.cairo | use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::concat::concat;
use orion::numbers::{NumberTrait, I32IntoU32};
fn concat_from_sequence<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, new_axis: Option<usize>
) -> Tensor<T> {
let new_axis: usize = match new_axis {
Option::Some(val) => {
assert(val == 0 || val == 1, 'new_axis must be 0 or 1');
val
},
Option::None => 0
};
let first_tensor = *sequence.at(0);
let r = first_tensor.shape.len();
if new_axis == 0 {
concat_without_new_axis(sequence, axis, r)
} else {
concat_with_new_axis(sequence, axis, r)
}
}
fn concat_without_new_axis<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, r: usize
) -> Tensor<T> {
let axis_is_negative: bool = axis < 0;
let mut axis_value: u32 = axis.into();
/// assert in range [-r, r - 1]
assert(
(!axis_is_negative && axis_value <= r - 1) || (axis_is_negative && axis_value <= r),
'Out of bounds for dimension'
);
if axis_is_negative {
axis_value = r - axis_value
}
concat(sequence.span(), axis_value)
}
fn concat_with_new_axis<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, r: usize
) -> Tensor<T> {
let axis_is_negative: bool = axis < 0;
let mut axis_value: u32 = axis.into();
/// assert in range [-r - 1, r]
assert(
(!axis_is_negative && axis_value <= r) || (axis_is_negative && axis_value <= r + 1),
'Out of bounds for dimension'
);
if axis_is_negative {
if axis_value > r {
axis_value = 0
} else {
axis_value = r - axis_value
}
}
let mut input_sequence_copy = sequence;
let mut reshaped_sequence: Array<Tensor<T>> = array![];
loop {
match input_sequence_copy.pop_front() {
Option::Some(input_sequence_value) => {
let mut reshaped_tensor = add_new_dimension(input_sequence_value, axis_value);
reshaped_sequence.append(reshaped_tensor);
},
Option::None => { break; }
};
};
concat(reshaped_sequence.span(), axis_value)
}
fn add_new_dimension<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
mut tensor: Tensor<T>, axis: usize
) -> Tensor<T> {
let mut tensor_shape = tensor.shape;
let mut new_tensor_shape: Array<usize> = array![];
let mut tensor_shape_counter: usize = 0;
loop {
match tensor_shape.pop_front() {
Option::Some(tensor_shape_value) => {
if tensor_shape_counter == axis {
new_tensor_shape.append(1);
}
new_tensor_shape.append(*tensor_shape_value);
tensor_shape_counter += 1;
},
Option::None => { break; }
};
};
if axis >= tensor.shape.len() {
new_tensor_shape.append(1);
}
TensorTrait::<T>::new(new_tensor_shape.span(), tensor.data)
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_at.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32};
/// Cf: SequenceTrait::sequence_at docstring
fn sequence_at<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
sequence: Array<Tensor<T>>, position: Tensor<i32>
) -> Tensor<T> {
assert(
position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'
);
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= sequence.len() - 1)
|| (is_negative && position_value <= sequence.len()),
'Position out of bounds'
);
if !is_negative {
*sequence.at(position_value)
} else {
let normalized_position_value = sequence.len() - position_value;
*sequence.at(normalized_position_value)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_construct.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_construct docstring
fn sequence_construct<T, impl TDrop: Drop<T>>(tensors: Array<Tensor<T>>) -> Array<Tensor<T>> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
tensors
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_empty.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_empty docstring
fn sequence_empty<T, impl TTensorTrait: TensorTrait<T>, impl TDrop: Drop<T>>() -> Array<Tensor<T>> {
let mut sequence = array![];
let mut shape: Array<usize> = array![];
shape.append(0);
let mut data: Array<T> = array![];
let tensor = TensorTrait::new(shape.span(), data.span());
sequence.append(tensor);
sequence
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_erase.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::I32Tensor;
use orion::numbers::{NumberTrait, I32IntoU32};
/// Cf: SequenceTrait::sequence_erase docstring
fn sequence_erase<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
sequence: Array<Tensor<T>>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>> {
let position: Tensor<i32> = match position {
Option::Some(p) => p,
Option::None => {
let mut shape: Array<usize> = array![];
let mut data: Array<i32> = array![];
data.append(-1_i32);
TensorTrait::<i32>::new(shape.span(), data.span())
}
};
assert(position.shape.len() == 0 && position.data.len() == 1, 'Position must be a scalar');
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let mut position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= sequence.len() - 1)
|| (is_negative && position_value <= sequence.len()),
'Position out of bounds'
);
if is_negative {
position_value = sequence.len() - position_value;
}
let mut input_sequence_copy = sequence;
let mut output_sequence: Array<Tensor<T>> = array![];
let mut tensor_counter: usize = 0;
loop {
match input_sequence_copy.pop_front() {
Option::Some(input_sequence_value) => {
if tensor_counter == position_value {
tensor_counter += 1;
continue;
}
output_sequence.append(input_sequence_value);
tensor_counter += 1;
},
Option::None => { break; }
};
};
output_sequence
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_insert.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::I32Tensor;
use orion::numbers::{NumberTrait, I32IntoU32};
/// Cf: SequenceTrait::sequence_insert docstring
fn sequence_insert<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Array<Tensor<T>>, tensor: @Tensor<T>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>> {
let position: Tensor<i32> = match position {
Option::Some(p) => p,
Option::None => {
let mut shape: Array<usize> = array![];
let mut data: Array<i32> = array![];
data.append(-1_i32);
TensorTrait::<i32>::new(shape.span(), data.span())
},
};
assert(position.shape.len() == 0 && position.data.len() == 1, 'Position must be a scalar');
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let mut position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= self.len() - 1)
|| (is_negative && position_value <= self.len()),
'Position out of bounds'
);
if is_negative {
position_value = self.len() - position_value;
}
let mut new_sequence: Array<Tensor<T>> = array![];
let mut inserted = false;
let mut self_copy = self;
loop {
match self_copy.pop_front() {
Option::Some(t) => {
if position_value == 0 && inserted == false {
new_sequence.append(*tensor);
inserted = true;
}
new_sequence.append(t);
if !inserted {
position_value -= 1;
}
},
Option::None => { break; },
};
};
new_sequence
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/functional/sequence_length.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_length docstring
fn sequence_length<T, impl TDrop: Drop<T>>(self: Array<Tensor<T>>) -> Tensor<u32> {
let mut shape: Array<usize> = array![];
let mut result: Array<usize> = array![];
result.append(self.len());
Tensor::<u32> { shape: shape.span(), data: result.span(), }
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations.cairo | mod sequence_bool;
mod sequence_i8;
mod sequence_i32;
mod sequence_u32;
mod sequence_fp8x23;
mod sequence_fp8x23wide;
mod sequence_fp16x16;
mod sequence_fp16x16wide;
mod sequence_fp32x32;
mod sequence_fp64x64;
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_bool.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl BoolSequence of SequenceTrait<bool> {
fn sequence_construct(tensors: Array<Tensor<bool>>) -> Array<Tensor<bool>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<bool>> {
functional::sequence_empty::sequence_empty::<bool>()
}
fn sequence_length(self: Array<Tensor<bool>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<bool>>, position: Tensor<i32>) -> Tensor<bool> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<bool>>, position: Option<Tensor<i32>>
) -> Array<Tensor<bool>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<bool>>, tensor: @Tensor<bool>, position: Option<Tensor<i32>>
) -> Array<Tensor<bool>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<bool>>, axis: i32, new_axis: Option<usize>
) -> Tensor<bool> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp16x16.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP16x16Sequence of SequenceTrait<FP16x16> {
fn sequence_construct(tensors: Array<Tensor<FP16x16>>) -> Array<Tensor<FP16x16>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP16x16>> {
functional::sequence_empty::sequence_empty::<FP16x16>()
}
fn sequence_length(self: Array<Tensor<FP16x16>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP16x16>>, position: Tensor<i32>) -> Tensor<FP16x16> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP16x16>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP16x16>>, tensor: @Tensor<FP16x16>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP16x16>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP16x16> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp16x16wide.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W;
use orion::operators::tensor::implementations::tensor_fp16x16wide::FP16x16WTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP16x16WSequence of SequenceTrait<FP16x16W> {
fn sequence_construct(tensors: Array<Tensor<FP16x16W>>) -> Array<Tensor<FP16x16W>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP16x16W>> {
functional::sequence_empty::sequence_empty::<FP16x16W>()
}
fn sequence_length(self: Array<Tensor<FP16x16W>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP16x16W>>, position: Tensor<i32>) -> Tensor<FP16x16W> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP16x16W>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16W>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP16x16W>>, tensor: @Tensor<FP16x16W>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16W>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP16x16W>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP16x16W> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp32x32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp32x32::core::FP32x32;
use orion::operators::tensor::implementations::tensor_fp32x32::FP32x32Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP32x32Sequence of SequenceTrait<FP32x32> {
fn sequence_construct(tensors: Array<Tensor<FP32x32>>) -> Array<Tensor<FP32x32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP32x32>> {
functional::sequence_empty::sequence_empty::<FP32x32>()
}
fn sequence_length(self: Array<Tensor<FP32x32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP32x32>>, position: Tensor<i32>) -> Tensor<FP32x32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP32x32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP32x32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP32x32>>, tensor: @Tensor<FP32x32>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP32x32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP32x32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP32x32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp64x64.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp64x64::core::FP64x64;
use orion::operators::tensor::implementations::tensor_fp64x64::FP64x64Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP64x64Sequence of SequenceTrait<FP64x64> {
fn sequence_construct(tensors: Array<Tensor<FP64x64>>) -> Array<Tensor<FP64x64>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP64x64>> {
functional::sequence_empty::sequence_empty::<FP64x64>()
}
fn sequence_length(self: Array<Tensor<FP64x64>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP64x64>>, position: Tensor<i32>) -> Tensor<FP64x64> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP64x64>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP64x64>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP64x64>>, tensor: @Tensor<FP64x64>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP64x64>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP64x64>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP64x64> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp8x23.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP8x23Sequence of SequenceTrait<FP8x23> {
fn sequence_construct(tensors: Array<Tensor<FP8x23>>) -> Array<Tensor<FP8x23>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP8x23>> {
functional::sequence_empty::sequence_empty::<FP8x23>()
}
fn sequence_length(self: Array<Tensor<FP8x23>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP8x23>>, position: Tensor<i32>) -> Tensor<FP8x23> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP8x23>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP8x23>>, tensor: @Tensor<FP8x23>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP8x23>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP8x23> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_fp8x23wide.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp8x23wide::core::FP8x23W;
use orion::operators::tensor::implementations::tensor_fp8x23wide::FP8x23WTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP8x23WSequence of SequenceTrait<FP8x23W> {
fn sequence_construct(tensors: Array<Tensor<FP8x23W>>) -> Array<Tensor<FP8x23W>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP8x23W>> {
functional::sequence_empty::sequence_empty::<FP8x23W>()
}
fn sequence_length(self: Array<Tensor<FP8x23W>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP8x23W>>, position: Tensor<i32>) -> Tensor<FP8x23W> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP8x23W>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23W>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP8x23W>>, tensor: @Tensor<FP8x23W>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23W>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP8x23W>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP8x23W> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_i32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl I32Sequence of SequenceTrait<i32> {
fn sequence_construct(tensors: Array<Tensor<i32>>) -> Array<Tensor<i32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<i32>> {
functional::sequence_empty::sequence_empty::<i32>()
}
fn sequence_length(self: Array<Tensor<i32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<i32>>, position: Tensor<i32>) -> Tensor<i32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<i32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<i32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<i32>>, tensor: @Tensor<i32>, position: Option<Tensor<i32>>
) -> Array<Tensor<i32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<i32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<i32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_i8.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_i8::I8Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl I8Sequence of SequenceTrait<i8> {
fn sequence_construct(tensors: Array<Tensor<i8>>) -> Array<Tensor<i8>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<i8>> {
functional::sequence_empty::sequence_empty::<i8>()
}
fn sequence_length(self: Array<Tensor<i8>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<i8>>, position: Tensor<i32>) -> Tensor<i8> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<i8>>, position: Option<Tensor<i32>>
) -> Array<Tensor<i8>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<i8>>, tensor: @Tensor<i8>, position: Option<Tensor<i32>>
) -> Array<Tensor<i8>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<i8>>, axis: i32, new_axis: Option<usize>
) -> Tensor<i8> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/sequence/implementations/sequence_u32.cairo | use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_u32::U32Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl U32Sequence of SequenceTrait<u32> {
fn sequence_construct(tensors: Array<Tensor<u32>>) -> Array<Tensor<u32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<u32>> {
functional::sequence_empty::sequence_empty::<u32>()
}
fn sequence_length(self: Array<Tensor<u32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<u32>>, position: Tensor<i32>) -> Tensor<u32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<u32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<u32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<u32>>, tensor: @Tensor<u32>, position: Option<Tensor<i32>>
) -> Array<Tensor<u32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<u32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<u32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor.cairo | mod core;
mod helpers;
mod math;
mod linalg;
mod quantization;
mod implementations;
mod manipulation;
mod ml;
use orion::operators::tensor::core::{Tensor, TensorSerde, TensorTrait};
use orion::operators::tensor::implementations::tensor_fp8x23::{
FP8x23Tensor, FP8x23TensorAdd, FP8x23TensorSub, FP8x23TensorMul, FP8x23TensorDiv,
FP8x23TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_fp32x32::{
FP32x32Tensor, FP32x32TensorAdd, FP32x32TensorSub, FP32x32TensorMul, FP32x32TensorDiv,
FP32x32TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_fp16x16::{
FP16x16Tensor, FP16x16TensorAdd, FP16x16TensorSub, FP16x16TensorMul, FP16x16TensorDiv,
FP16x16TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_i8::{
I8Tensor, I8TensorAdd, I8TensorSub, I8TensorMul, I8TensorDiv, I8TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_i32::{
I32Tensor, I32TensorAdd, I32TensorSub, I32TensorMul, I32TensorDiv, I32TensorPartialEq,
TensorI8IntoTensorI32
};
use orion::operators::tensor::implementations::tensor_u32::{
U32Tensor, U32TensorAdd, U32TensorSub, U32TensorMul, U32TensorDiv, U32TensorPartialEq
};
use orion::operators::tensor::implementations::tensor_bool::{BoolTensor, BoolTensorPartialEq};
use orion::operators::tensor::implementations::tensor_complex64::{
Complex64Tensor, Complex64TensorAdd, Complex64TensorSub, Complex64TensorMul, Complex64TensorDiv,
Complex64TensorPartialEq,
};
| https://github.com/gizatechxyz/orion |
src/operators/tensor/core.cairo | use alexandria_data_structures::array_ext::ArrayTraitExt;
use core::array::{ArrayTrait, SpanTrait};
use core::serde::Serde;
use core::option::OptionTrait;
use alexandria_data_structures::array_ext::{SpanTraitExt};
//::resize::{MODE, NEAREST_MODE, KEEP_ASPECT_RATIO_POLICY, TRANSFORMATION_MODE};
use orion::operators::tensor::helpers::{len_from_shape, check_shape};
use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32};
#[derive(Copy, Drop)]
struct Tensor<T> {
shape: Span<usize>,
data: Span<T>,
}
//Implement TensorSerde
impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<T>> {
fn serialize(self: @Tensor<T>, ref output: Array<felt252>) {
self.shape.serialize(ref output);
self.data.serialize(ref output);
}
fn deserialize(ref serialized: Span<felt252>) -> Option<Tensor<T>> {
let shape: Span<usize> = Serde::<Span<usize>>::deserialize(ref serialized)?;
let data: Span<T> = Serde::<Span<T>>::deserialize(ref serialized)?;
Option::Some(Tensor { shape, data })
}
}
/// Trait
///
/// new - Returns a new tensor with the given shape and data.
/// reshape - Returns a new tensor with the specified target shape and the same data as the input tensor.
/// flatten - Flattens the input tensor into a 2D tensor.
/// constant_of_shape - Generate a tensor with given value and shape.
/// transpose - Returns a new tensor with the axes rearranged according to the given permutation.
/// at - Retrieves the value at the specified indices of a Tensor.
/// ravel_index - Converts a multi-dimensional index to a one-dimensional index.
/// unravel_index - Converts a one-dimensional index to a multi-dimensional index.
/// equal - Check if two tensors are equal element-wise.
/// greater - Check if each element of the first tensor is greater than the corresponding element of the second tensor.
/// greater_equal - Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor.
/// less - Check if each element of the first tensor is less than the corresponding element of the second tensor.
/// less_equal - Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor.
/// or - Computes the logical OR of two tensors element-wise.
/// xor - Computes the logical XOR of two tensors element-wise.
/// stride - Computes the stride of each dimension in the tensor.
/// onehot - Produces one-hot tensor based on input.
/// max_in_tensor - Returns the maximum value in the tensor.
/// min_in_tensor - Returns the minimum value in the tensor.
/// min - Returns the minimum value in the tensor.
/// max - Returns the maximum value in the tensor.
/// reduce_sum - Reduces a tensor by summing its elements along a specified axis.
/// reduce_prod - Reduces a tensor to its products along specified axis.
/// argmax - Returns the index of the maximum value along the specified axis.
/// argmin - Returns the index of the minimum value along the specified axis.
/// cumsum - Performs cumulative sum of the input elements along the given axis.
/// matmul - Performs matrix product of two tensors.
/// exp - Computes the exponential of all elements of the input tensor.
/// log - Computes the natural log of all elements of the input tensor.
/// abs - Computes the absolute value of all elements in the input tensor.
/// neg - Computes the negation of all elements in the input tensor.
/// ceil - Rounds up the value of each element in the input tensor.
/// sqrt - Computes the square root of all elements of the input tensor.
/// sin - Computes the sine of all elements of the input tensor.
/// cos - Computes the cosine of all elements of the input tensor.
/// atan - Computes the arctangent (inverse of tangent) of all elements of the input tensor.
/// asin - Computes the arcsine (inverse of sine) of all elements of the input tensor.
/// acos - Computes the arccosine (inverse of cosine) of all elements of the input tensor.
/// sinh - Computes the hyperbolic sine of all elements of the input tensor.
/// tanh - Computes the hyperbolic tangent of all elements of the input tensor.
/// cosh - Computes the hyperbolic cosine of all elements of the input tensor.
/// asinh - Computes the inverse hyperbolic sine of all elements of the input tensor.
/// acosh - Computes the inverse hyperbolic cosine of all elements of the input tensor.
/// slice - Produces a slice of the input tensor along multiple axes.
/// concat - Concatenate a list of tensors into a single tensor.
/// quantize_linear - Quantizes a Tensor to i8 using linear quantization.
/// dequantize_linear - Dequantizes an i8 Tensor using linear dequantization.
/// qlinear_add - Performs the sum of two quantized i8 Tensors.
/// qlinear_mul - Performs the element-wise multiplication of quantized Tensors.
/// qlinear_matmul - Performs the product of two quantized i8 Tensors.
/// qlinear_concat - Concatenate a list of tensors after dequantizing them with their respective scales and zero_points and returns the quantized result.
/// qlinear_leakyrelu - Applies the Leaky Relu operator to a quantized Tensor
/// gather - Gather entries of the axis dimension of data.
/// nonzero - Produces indices of the elements that are non-zero (in row-major order - by dimension).
/// squeeze - Removes dimensions of size 1 from the shape of a tensor.
/// unsqueeze - Inserts single-dimensional entries to the shape of an input tensor.
/// sign - Calculates the sign of the given input tensor element-wise.
/// clip - Clip operator limits the given input within an interval.
/// and - Computes the logical AND of two tensors element-wise.
/// identity - Return a Tensor with the same shape and contents as input.
/// where - Return elements chosen from x or y depending on condition.
/// bitwise_and - Computes the bitwise AND of two tensors element-wise.
/// bitwise_xor - Computes the bitwise XOR of two tensors element-wise.
/// bitwise_or - Computes the bitwise OR of two tensors element-wise.
/// resize - Resizes the input tensor.
/// round - Computes the round value of all elements in the input tensor.
/// reduce_l1 - Computes the L1 norm of the input tensor's elements along the provided axes.
/// trilu - Returns the upper or lower triangular part of a tensor or a batch of 2D matrices.
/// scatter - Produces a copy of input data, and updates value to values specified by updates at specific index positions specified by indices.
/// reduce_sum_square - Computes the sum square of the input tensor's elements along the provided axes.
/// reduce_l2 - Computes the L2 norm of the input tensor's elements along the provided axes.
/// gather_elements - GatherElements is an indexing operation that produces its output by indexing into the input data tensor at index positions determined by elements of the indices tensor.
/// reduce_min - Computes the min of the input tensor's elements along the provided axes.
/// shrink – Shrinks the input tensor element-wise to the output tensor with the same datatype and shape based on a defined formula.
/// reduce_mean - Computes the mean of the input tensor's elements along the provided axes.
/// pow - Pow takes input data (Tensor) and exponent Tensor, and produces one output data (Tensor) where the function f(x) = x^exponent, is applied to the data tensor elementwise.
/// binarizer - Maps the values of a tensor element-wise to 0 or 1 based on the comparison against a threshold value.
/// array_feature_extractor - Selects elements of the input tensor based on the indices passed applied to the last tensor axis.
/// reduce_min - Computes the min of the input tensor's elements along the provided axes.
/// is_nan - Returns which elements of the input are NaN.
/// is_inf - Maps infinity to true and other values to false.
/// not - Computes the logical negation of all elements in the input tensor.
/// gather_nd - Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.
/// reduce_log_sum - Computes the log sum of the input tensor's elements along the provided axes.
/// erf - Computes the error function of the given input tensor element-wise.
/// reduce_log_sum_exp - Computes the log sum of the exponentials of the input tensor's elements along the provided axes.
/// layer_normalization - computes the layer normalization of the input tensor.
/// split - Split a tensor into a list of tensors, along the specified ‘axis’.
/// random_uniform_like - RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor.
/// split_to_sequence - Split a tensor into a sequence of tensors, along the specified ‘axis’.
/// range - Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive).
/// hann_window - Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106.
/// hamming_window - Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106.
/// blackman_window - Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106.
/// reverse_sequence - Reverse batch of sequences having different lengths specified by sequence_lens.
/// optional - Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, or a non-empty value containing the input element.
/// dynamic_quantize_linear - Computes the Scale, Zero Point and FP32->8Bit conversion of FP32 Input data.
/// scatter_nd - The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data
/// label_encoder - Maps each element in the input tensor to another value.
trait TensorTrait<T> {
/// # tensor.new
///
/// ```rust
/// fn new(shape: Span<usize>, data: Span<T>) -> Tensor<T>;
/// ```
///
/// Returns a new tensor with the given shape and data.
///
/// ## Args
///
/// * `shape`(`Span<usize>`) - A span representing the shape of the tensor.
/// * `data` (`Span<T>`) - A span containing the array of elements.
///
/// ## Panics
///
/// * Panics if the shape and data length are incompatible.
///
/// ## Returns
///
/// A new `Tensor<T>` instance.
///
/// ## Examples
///
/// Let's create new u32 Tensors.
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{
/// TensorTrait, // we import the trait
/// Tensor, // we import the type
/// U32Tensor // we import the implementation.
/// };
///
/// // 1D TENSOR
/// fn tensor_1D() -> Tensor<u32> {
/// let tensor = TensorTrait::new(shape: array![3].span(), data: array![0, 1, 2].span());
///
/// return tensor;
/// }
///
/// // 2D TENSOR
/// fn tensor_2D() -> Tensor<u32> {
/// let tensor = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span());
///
/// return tensor;
/// }
///
/// // 3D TENSOR
/// fn tensor_3D() -> Tensor<u32> {
/// let tensor = TensorTrait::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor;
/// }
/// ```
///
fn new(shape: Span<usize>, data: Span<T>) -> Tensor<T>;
/// # tensor.at
///
/// ```rust
/// fn at(self: @Tensor<T>, indices: Span<usize>) -> T;
/// ```
///
/// Retrieves the value at the specified indices of a Tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Span<usize>`) - The indices to access element of the Tensor.
///
/// ## Panics
///
/// * Panics if the number of indices provided don't match the number of dimensions in the tensor.
///
/// ## Returns
///
/// The `T` value at the specified indices.
///
/// # Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
///
/// fn at_example() -> u32 {
/// let tensor = TensorTrait::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `at` function as follows.
/// return tensor.at(indices: array![0, 1, 1].span());
/// }
/// >>> 3
/// ```
///
fn at(self: @Tensor<T>, indices: Span<usize>) -> T;
/// # tensor.min_in_tensor
///
/// ```rust
/// fn min_in_tensor(self: @Tensor<T>) -> T;
/// ```
///
/// Returns the minimum value in the tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// The minimum `T` value in the tensor.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn min_in_tensor_example() -> u32 {
/// let tensor = TensorTrait::new(
/// shape: array![2, 2, 2].span(),
/// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `min_in_tensor` function as follows.
/// return tensor.min_in_tensor();
/// }
/// >>> 0
/// ```
///
fn min_in_tensor(self: @Tensor<T>) -> T;
fn add(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn sub(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn mul(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn div(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
/// # tensor.min
///
/// ```rust
/// fn min(tensors: Span<Tensor<T>>) -> Tensor<T>;
/// ```
///
/// Returns the element-wise minimum values from a list of input tensors
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `tensors`(` Span<Tensor<T>>,`) - Array of the input tensors
///
/// ## Returns
///
/// A new `Tensor<T>` containing the element-wise minimum values
///
/// ## Panics
///
/// * Panics if tensor array is empty
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Examples
///
/// Case 1: Process tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn min_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 3, 1, 2].span(),);
/// let result = TensorTrait::min(tensors: array![tensor1, tensor2].span());
/// return result;
/// }
/// >>> [0, 1, 1, 2]
///
/// result.shape
/// >>> (2, 2)
/// ```
///
/// Case 2: Process tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn min_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![1, 2].span(), data: array![1, 4].span(),);
/// let result = TensorTrait::min(tensors: array![tensor1, tensor2].span());
/// return result;
/// }
/// >>> [0, 1, 1, 4]
///
/// result.shape
/// >>> (2, 2)
/// ```
///
fn min(tensors: Span<Tensor<T>>) -> Tensor<T>;
/// # tensor.max_in_tensor
///
/// ```rust
/// fn max_in_tensor(self: @Tensor<T>) -> T;
/// ```
///
/// Returns the maximum value in the tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// The maximum `T` value in the tensor.
///
/// Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn max_in_tensor_example() -> u32 {
/// let tensor = TensorTrait::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `max_in_tensor` function as follows.
/// return tensor.max_in_tensor();
/// }
/// >>> 7
/// ```
///
fn max_in_tensor(self: @Tensor<T>) -> T;
/// # tensor.max
///
/// ```rust
/// fn max(tensors: Span<Tensor<T>>) -> Tensor<T>;
/// ```
///
/// Returns the element-wise maximum values from a list of input tensors
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `tensors`(` Span<Tensor<T>>,`) - Array of the input tensors
///
/// ## Returns
///
/// A new `Tensor<T>` containing the element-wise maximum values
///
/// ## Panics
///
/// * Panics if tensor array is empty
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Examples
///
/// Case 1: Process tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn max_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 3, 1, 2].span(),);
/// let result = TensorTrait::max(tensors: array![tensor1, tensor2].span());
/// return result;
/// }
/// >>> [0, 3, 2, 3]
///
/// result.shape
/// >>> (2, 2)
/// ```
///
/// Case 2: Process tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn max_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![1, 2].span(), data: array![1, 4].span(),);
/// let result = TensorTrait::max(tensors: array![tensor1, tensor2].span());
/// return result;
/// }
/// >>> [1, 4, 2, 4]
///
/// result.shape
/// >>> (2, 2)
/// ```
///
fn max(tensors: Span<Tensor<T>>) -> Tensor<T>;
/// # tensor.stride
///
/// ```rust
/// fn stride(self: @Tensor<T>) -> Span<usize>;
/// ```
///
/// Computes the stride of each dimension in the tensor.
///
/// ## Args
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// A span of usize representing the stride for each dimension of the tensor.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn stride_example() -> Span<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `stride` function as follows.
/// return tensor.stride();
/// }
/// >>> [4,2,1]
/// ```
///
fn stride(self: @Tensor<T>) -> Span<usize>;
/// # tensor.ravel_index
///
/// ```rust
/// fn ravel_index(self: @Tensor<T>, indices: Span<usize>) -> usize;
/// ```
///
/// Converts a multi-dimensional index to a one-dimensional index.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Span<usize>`) - The indices of the Tensor to ravel.
///
/// ## Panics
///
/// * Panics if the indices are out of bounds of the Tensor shape.
///
/// ## Returns
///
/// The index corresponding to the given indices.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn ravel_index_example() -> usize {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `ravel_index` function as follows.
/// return tensor.ravel_index(indices: array![1, 3, 0].span());
/// }
/// >>> 10
/// // This means that the value of indices [1,3,0]
/// // of a multidimensional array can be found at index 10 of Tensor.data.
/// ```
///
fn ravel_index(self: @Tensor<T>, indices: Span<usize>) -> usize;
/// # tensor.unravel_index
///
/// ```rust
/// fn unravel_index(self: @Tensor<T>, index: usize) -> Span<usize>;
/// ```
///
/// Converts a one-dimensional index to a multi-dimensional index.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Span<usize>`) - The index to unravel.
///
/// ## Panics
///
/// * Panics if the index is out of bounds of the Tensor shape.
///
/// ## Returns
///
/// The unraveled indices corresponding to the given index.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn unravel_index_example() -> Span<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `unravel_index` function as follows.
/// return tensor.unravel_index(3);
/// }
/// >>> [0,1,1]
/// // This means that the value of index 3 of Tensor.data
/// // can be found at indices [0,1,1] in multidimensional representation.
/// ```
///
fn unravel_index(self: @Tensor<T>, index: usize) -> Span<usize>;
/// # tensor.reshape
///
/// ```rust
/// fn reshape(self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool) -> Tensor<T>;
/// ```
///
/// Reshape the input tensor similar to numpy.reshape. First input is the data tensor, second
/// input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
/// At most one dimension of the new shape can be -1. In this case, the value is inferred from
/// the size of the tensor and the remaining dimensions. A dimension could also be 0, in which case
/// the actual dimension value is unchanged (i.e. taken from the input tensor). If 'allowzero' is set,
/// and the new shape includes 0, the dimension will be set explicitly to zero (i.e. not taken from input tensor)
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `target_shape`(Span<i32>) - A span containing the target shape of the tensor.
/// * `allowzero`(`bool`) - Indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy.
///
/// ## Panics
///
/// * Panics if the target shape is incompatible with the input tensor's data.
///
/// ## Returns
///
/// A new `Tensor<T>` with the specified target shape and the same data.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reshape_tensor_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reshape` function as follows.
/// return tensor.reshape(target_shape: array![2, 4].span(), false);
/// }
/// >>> [[0,1,2,3], [4,5,6,7]]
/// ```
///
fn reshape(self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool) -> Tensor<T>;
/// # tensor.transpose
///
/// ```rust
/// fn transpose(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>;
/// ```
///
/// Returns a new tensor with the axes rearranged according to the given permutation.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axes`(`Span<usize>`) - The usize elements representing the axes to be transposed.
///
/// ## Panics
///
/// * Panics if the length of the axes array is not equal to the rank of the input tensor.
///
/// ## Returns
///
/// A `Tensor<T>` instance with the axes reordered according to the given permutation.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn transpose_tensor_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `transpose` function as follows.
/// return tensor.transpose(axes: array![1, 2, 0].span());
/// }
/// >>> [[[0,4],[1,5]],[[2,6],[3,7]]]
/// ```
///
fn transpose(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>;
/// ## tensor.reduce_sum
///
/// ```rust
/// fn reduce_sum(self: @Tensor<T>, axes: Option<Span<i32>>, keepdims: Option<bool>, noop_with_empty_axes: Option<bool>) -> Tensor<T>;
/// ```
///
/// Reduces a tensor by summing its elements along a specified axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axes`(`Option<Span<i32>>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true.
/// * `keepdims`(`Option<bool>`) - Keep the reduced dimension or not, default 1 means keep reduced dimension.
/// * `noop_with_empty_axes`(`Option<bool>`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor.
///
/// ## Returns
///
/// Reduced output tensor.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_sum_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_sum` function as follows.
/// return tensor.reduce_sum(axes: Option::None, keepdims: false);
/// }
/// >>> [[4,6],[8,10]]
/// ```
///
fn reduce_sum(
self: @Tensor<T>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
/// # tensor.argmax
///
/// ```rust
/// fn argmax(self: @Tensor<T>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>) -> Tensor<i32>;
/// ```
///
/// Returns the index of the maximum value along the specified axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`i32`) - The axis along which to compute the argmax.
/// * `keepdims`(`Option<bool>`) - If true, retains reduced dimensions with length 1. Defaults to true.
/// * `select_last_index`(`Option<bool>`) - If true, the index of the last occurrence of the maximum value is returned. Defaults to false.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance containing the indices of the maximum values along the specified axis.
///
/// ## Examples
///
/// Case 1: argmax with default parameters
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmax_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmax` function as follows.
/// return tensor.argmax(axis: 2, keepdims: Option::None(()), select_last_index: Option::None(()));
/// }
/// >>> [[[1,1],[0,0]]]
/// ```
/// Case 2: argmax with keepdims set to false
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmax_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmax` function as follows.
/// return tensor
/// .argmax(axis: 2, keepdims: Option::Some(false), select_last_index: Option::None(()));
/// }
/// >>> [[1,1],[0,0]]
/// ```
///
/// Case 3: argmax with select_last_index set to true
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmax_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmax` function as follows.
/// return tensor
/// .argmax(axis: 2, keepdims: Option::None(()), select_last_index: Option::Some(true));
/// }
/// >>> [[[1,1],[1,1]]]
/// ```
///
fn argmax(
self: @Tensor<T>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32>;
/// # tensor.argmin
///
/// ```rust
/// fn argmin(self: @Tensor<T>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>) -> Tensor<usize>;
/// ```
///
/// Returns the index of the minimum value along the specified axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The axis along which to compute the argmin.
/// * `keepdims`(`Option<bool>`) - If true, retains reduced dimensions with length 1. Defaults to true.
/// * `select_last_index`(`Option<bool>`) - If true, the index of the last occurrence of the minimum value is returned. Defaults to false.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance containing the indices of the minimum values along the specified axis.
///
/// ## Examples
///
/// Case 1: argmin with default parameters
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmin_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmin` function as follows.
/// return tensor.argmin(axis: 2, keepdims: Option::None(()), select_last_index: Option::None(()));
/// }
/// >>> [[[0,0],[0,0]]]
///
/// ```
/// Case 2: argmin with keepdims set to false
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmin_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmin` function as follows.
/// return tensor
/// .argmin(axis: 2, keepdims: Option::Some(false), select_last_index: Option::None(()));
/// }
/// >>> [[0,0],[0,0]]
/// ```
///
/// Case 3: argmin with select_last_index set to true
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn argmin_example() -> Tensor<usize> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 4, 5, 5].span(),
/// );
///
/// // We can call `argmin` function as follows.
/// return tensor
/// .argmin(axis: 2, keepdims: Option::None(()), select_last_index: Option::Some(true));
/// }
/// >>> [[[0,0],[1,1]]]
/// ```
///
fn argmin(
self: @Tensor<T>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize>;
/// # tensor.matmul
///
/// ```rust
/// fn matmul(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Performs matrix product of two tensors.
/// The behavior depends on the dimensionality of the tensors as follows:
/// * If both tensors are 1-dimensional, the dot product is returned.
/// * If both arguments are 2-dimensional, the matrix-matrix product is returned.
/// * If the first argument is 1-dimensional and the second argument is 2-dimensional, a 1 is prepended to its dimension for the purpose of the matrix multiply. After the matrix multiply, the prepended dimension is removed.
/// * If the first argument is 2-dimensional and the second argument is 1-dimensional, the matrix-vector product is returned.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - the first tensor to be multiplied
/// * `other`(`@Tensor<T>`) - the second tensor to be multiplied
///
/// ## Panics
///
/// * Panics if the dimension of the tensors is higher than two.
///
/// ## Returns
///
/// A new `Tensor<T>` resulting from the matrix multiplication.
///
/// ## Examples
///
/// Case 1: Dot product of two vectors (1D \* 1D)
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn dot_product_example() -> Tensor<u32> {
/// let tensor_1 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `matmul` function as follows.
/// return tensor_1.matmul(@tensor_2);
/// }
/// >>> [5]
/// ```
///
/// Case 2: Matrix multiplication (2D \* 2D)
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn matrix_mul_example() -> Tensor<u32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(), data: array![244, 99, 109, 162].span()
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(), data: array![151, 68, 121, 170].span()
/// );
///
/// // We can call `matmul` function as follows.
/// return tensor_1.matmul(@tensor_2);
/// }
/// >>> [[48823, 33422],[36061, 34952]]
/// ```
///
/// Case 3: Matrix-Vector multiplication (2D x 1D)
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn matrix_vec_mul_example() -> Tensor<u32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `matmul` function as follows.
/// return tensor_1.matmul(@tensor_2);
/// }
/// >>> [5,14,23]
/// ```
///
fn matmul(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// # tensor.exp
///
/// ```rust
/// fn exp(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the exponential of all elements of the input tensor.
/// $$
/// y_i=e^{x_i}
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the exponential of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn exp_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false),
/// ]
/// );
///
/// // We can call `exp` function as follows.
/// return tensor.exp();
/// }
/// >>> [[8388608,22802594],[61983844,168489688]]
/// // The fixed point representation of
/// // [[1, 2.718281],[7.38905, 20.085536]]
/// ```
///
fn exp(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.log
///
/// ```rust
/// fn log(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the natural log of all elements of the input tensor.
/// $$
/// y_i=log({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the natural log of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn log_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(100, false),
/// ]
/// );
///
/// // We can call `log` function as follows.
/// return tensor.log();
/// }
/// >>> [[0, 5814538, 9215825, 38630966]]
/// // The fixed point representation of
/// /// [[0, 0.693147, 1.098612, 4.605170]]
/// ```
///
fn log(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.equal
///
/// ```rust
/// fn equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Check if two tensors are equal element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be equated
/// * `other`(`@Tensor<T>`) - The second tensor to be equated
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<usize>` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn eq_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// // We can call `equal` function as follows.
/// return tensor_1.equal(@tensor_2);
/// }
/// >>> [1,1,1,1,1,0,0,0]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn eq_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `equal` function as follows.
/// return tensor_1.equal(@tensor_2);
/// }
/// >>> [1,1,1,0,0,0,0,0,0]
/// ```
///
fn equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// #tensor.greater
///
/// ```rust
/// fn greater(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Check if each element of the first tensor is greater than the corresponding element of the second tensor.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<usize>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn greater_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// // We can call `greater` function as follows.
/// return tensor_1.greater(@tensor_2);
/// }
/// >>> [0,0,0,0,0,0,0,1,1]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn greater_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `greater` function as follows.
/// return tensor_1.greater(@tensor_2);
/// }
/// >>> [0,0,0,1,1,1,1,1,1]
/// ```
///
fn greater(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// #tensor.greater_equal
///
/// ```rust
/// fn greater_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<usize>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn greater_equal_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// // We can call `greater_equal` function as follows.
/// return tensor_1.greater_equal(@tensor_2);
/// }
/// >>> [1,1,1,1,1,1,0,1,1]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn greater_equal_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `greater_equal` function as follows.
/// return tensor_1.greater_equal(@tensor_2);
/// }
/// >>> [1,1,1,1,1,1,0,0,0]
/// ```
///
fn greater_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// #tensor.less
///
/// ```rust
/// fn less(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>;
/// ```
///
/// Check if each element of the first tensor is less than the corresponding element of the second tensor.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<bool>` of booleans with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn less_example() -> Tensor<i32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// // We can call `less` function as follows.
/// return tensor_1.less(@tensor_2);
/// }
/// >>> [0,0,0,0,0,0,1,0,0]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn less_example() -> Tensor<i32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `less` function as follows.
/// return tensor_1.less(@tensor_2);
/// }
/// >>> [0,0,0,0,0,0,0,1,1]
/// ```
///
fn less(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>;
/// #tensor.less_equal
///
/// ```rust
/// fn less_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>;
/// ```
///
/// Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<i32>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn less_equal_example() -> Tensor<i32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// // We can call `less_equal` function as follows.
/// return tensor_1.less_equal(@tensor_2);
/// }
/// >>> [1,1,1,1,1,1,1,0,0]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn less_equal_example() -> Tensor<i32> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 1, 2].span(),);
///
/// // We can call `less_equal` function as follows.
/// return tensor_1.less_equal(@tensor_2);
/// }
/// >>> [1,1,1,0,0,0,1,1,1]
/// ```
///
fn less_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>;
/// #tensor.abs
///
/// ```rust
/// fn abs(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the absolute value of all elements in the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the absolute value of all elements in the input tensor.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
///
/// fn abs_example() -> Tensor<i32> {
/// let tensor = TensorTrait::new(
/// shape: array![3].span(),
/// data: array![
/// -1, -2, 3
/// ]
/// .span(),
/// );
///
/// return tensor.abs();
/// }
/// >>> [1, 2, 3]
/// ```
///
fn abs(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.neg
///
/// ```rust
/// fn neg(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the negation of all elements in the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the negation of all elements in the input tensor.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
///
/// fn neg_example() -> Tensor<i32> {
/// let tensor = TensorTrait::new(
/// shape: array![3].span(),
/// data: array![
/// -1, -2, 3
/// ]
/// .span(),
/// );
///
/// return tensor.neg();
/// }
/// >>> [1, 2, -3]
/// ```
///
fn neg(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.ceil
///
/// ```rust
/// fn ceil(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Rounds up the value of each element in the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the rounded up value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn ceil_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new(29998, false), // 0.003576
/// FixedTrait::new(100663252, false), // 11.9999947548
/// FixedTrait::new(100663252, true) // -11.9999947548
/// ]
/// .span(),
/// );
///
/// return tensor.ceil();
/// }
/// >>> [1,12,-11]
/// ```
///
fn ceil(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.sin
///
/// ```rust
/// fn sin(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the sine of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the sine value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn sin_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false)
/// ]
/// .span(),
/// );
///
/// return tensor.sin();
/// }
/// >>> [0,7058770,7627740]
/// // The fixed point representation of
/// // [0,0.8414...,0.9092...]
/// ```
///
fn sin(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.cos
///
/// ```rust
/// fn cos(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the cosine of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the cosine value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn cos_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false)
/// ]
/// .span(),
/// );
///
/// return tensor.cos();
/// }
/// >>> [8388608,4532384,-3490893]
/// // The fixed point representation of
/// // [1, 0.5403...,-0.4161]
/// ```
///
fn cos(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.cumsum
///
/// ```rust
/// fn cumsum(self: @Tensor<T>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>) -> Tensor<usize>;
/// ```
///
/// Performs cumulative sum of the input elements along the given axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The axis along which to compute the cumulative sum.
/// * `exclusive`(`Option<bool>`) - By default, it will do the sum inclusively meaning the first element is copied as is.
/// * `reverse`(`Option<bool>`) - If true, the cumulative sum is performed in the opposite direction. Defaults to false.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance containing the cumulative sum of the input tensor's elements along the given axis.
///
/// ## Examples
///
/// Case 1: cumsum with default parameters
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn cumsum_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.cumsum(axis: 2, exclusive: Option::None(()), reverse: Option::None(()));
/// }
/// >>> [[[0,1],[2,5]],[[4,9],[6,13]]]
/// ```
///
/// Case 2: cumsum with exclusive = true
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn cumsum_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.cumsum(axis: 2, exclusive: Option::Some(true), reverse: Option::None(()));
/// }
/// >>> [[[0,0],[0,2]],[[0,4],[0,6]]]
/// ```
///
/// Case 3: cumsum with exclusive = true and reverse = true
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn cumsum_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.cumsum(axis: 2, exclusive: Option::Some(true), reverse: Option::Some(true));
/// }
/// >>> [[[1,0],[3,0]],[[5,0],[7,0]]]
/// ```
///
fn cumsum(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<T>;
/// # tensor.flatten
///
/// ```rust
/// fn flatten(self: @Tensor<T>, axis: usize) -> Tensor<T>;
/// ```
///
/// Flattens the input tensor into a 2D tensor.
/// If input tensor has shape (1, 2, 3,...n) then the output will have shape
/// (1 * 2 * 3 * ... (axis-1), axis * (axis+1) * ... n).
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - Indicate up to which input dimensions (exclusive) should be flattened.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance containing the flattened version of the input tensor.
///
/// ## Examples
///
/// Case 1: flatten with axis 0
///
/// ```rust
/// fn flatten_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(),
/// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// extra: Option::None(())
/// );
///
/// return tensor.flatten(0); // equivalent to tensor.reshape(1,8)
/// }
/// >>> [[0,1,2,5,4,9,6,13]]
/// ```
///
/// Case 2: flatten with axis 1
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn flatten_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.flatten(1); // equivalent to tensor.reshape(2,4)
/// }
/// >>> [[0,1,2,3],[4,5,6,7]]
/// ```
///
/// Case 3: flatten with axis 2
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn flatten_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.flatten(2); // equivalent to tensor.reshape(4,2)
/// }
/// >>> [[0,1],[2,3],[4,5],[6,7]]
/// ```
///
fn flatten(self: @Tensor<T>, axis: usize) -> Tensor<T>;
/// # tensor.sinh
///
/// ```rust
/// fn sinh(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the hyperbolic sine of all elements of the input tensor.
/// $$
/// y_i=sinh({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the hyperbolic sine of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn sinh_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false)
/// ]
/// .span(),
/// );
///
/// return tensor.sinh();
/// }
/// >>> [[0,9858303],[30424311,84036026]]
/// // The fixed point representation of
/// // [[0, 1.175201],[3.62686, 10.0178749]]
/// ```
///
fn sinh(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.tanh
///
/// ```rust
/// fn tanh(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the hyperbolic tangent of all elements of the input tensor.
/// $$
/// y_i=tanh({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the hyperbolic tangent of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn tanh_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false)
/// ]
/// .span(),
/// );
///
/// return tensor.tanh();
/// }
/// >>> [[0,6388715],[8086850,8347125]]
/// // The fixed point representation of
/// // [[0, 0.761594],[0.96403, 0.9951]]
/// ```
///
fn tanh(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.cosh
///
/// ```rust
/// fn cosh(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the hyperbolic cosine of all elements of the input tensor.
/// $$
/// y_i=cosh({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the hyperblic cosine of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn cosh_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false)
/// ]
/// .span(),
/// );
///
/// return tensor.cosh();
/// }
/// >>> [[8388608,12944299],[31559585,84453670]]
/// // The fixed point representation of
/// // [[, 1.54308],[3.762196, 10.067662]]
/// ```
///
fn cosh(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.asinh
///
/// ```rust
/// fn asinh(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the inverse hyperbolic sine of all elements of the input tensor.
/// $$
/// y_i=asinh({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the hyperblic sine of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn asinh_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false)
/// ]
/// .span(),
/// );
///
/// return tensor.asinh();
/// }
/// >>> [[0,7393498],[12110093,15254235]]
/// // The fixed point representation of
/// // [[0, 0.8814],[1.44364, 1.8185]]
/// ```
///
fn asinh(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.acosh
///
/// ```rust
/// fn acosh(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the inverse hyperbolic cosine of all elements of the input tensor.
/// $$
/// y_i=acosh({x_i})
/// $$
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// Returns a new tensor in `T` with the hyperblic cosine of the elements of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn acosh_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false),
/// FixedTrait::new_unscaled(4, false)
/// ]
/// .span(),
/// );
///
/// return tensor.acosh();
/// }
/// >>> [[0,11047444],[14786996,17309365]]
/// // The fixed point representation of
/// // [[0, 1.31696],[1.76275, 2.06344]]
/// ```
///
fn acosh(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.atan
///
/// ```rust
/// fn atan(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the arctangent (inverse of tangent) of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the arctangent (inverse of tangent) value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn atan_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// ]
/// .span(),
/// );
///
/// return tensor.atan();
/// }
/// >>> [0,6588397,9287028]
/// // The fixed point representation of
/// // [0,0.7853...,1.1071...]
/// ```
///
fn atan(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.asin
///
/// ```rust
/// fn asin(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the arcsine (inverse of sine) of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the arcsine value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn asin_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2].span(),
/// data: array![FixedTrait::new_unscaled(0, false), FixedTrait::new_unscaled(1, false),]
/// .span(),
/// );
///
/// return tensor.asin();
/// }
/// >>> [0, 13176794]
/// // The fixed point representation of
/// // [0, 1.5707...]
/// ```
///
fn asin(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.or
///
/// ```rust
/// fn or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Computes the logical OR of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<usize>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn or_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// return tensor_1.or(@tensor_2);
/// }
/// >>> [0,1,1,1,1,1,1,1,1]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn or_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![1, 3].span(), data: array![0, 1, 2].span(),
/// );
///
/// return tensor_1.or(@tensor_2);
/// }
/// >>> [0,1,1,1,1,1,1,1,1]
/// ```
///
fn or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// #tensor.xor
///
/// ```rust
/// fn xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Computes the logical XOR of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<usize>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn xor_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 9, 1, 5].span(),
/// );
///
/// return tensor_1.xor(@tensor_2);
/// }
/// >>> [0,0,0,0,0,0,0,0,0]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn xor_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![1, 3].span(), data: array![0, 1, 2].span(),
/// );
///
/// return tensor_1.xor(@tensor_2);
/// }
/// >>> [0,0,0,1,0,0,1,0,0]
/// ```
///
fn xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// #tensor.acos
///
/// ```rust
/// fn acos(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the arccosine (inverse of cosine) of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the arccosine value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn acos_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2].span(),
/// data: array![FixedTrait::new_unscaled(0, false), FixedTrait::new_unscaled(1, false),]
/// .span(),
/// );
///
/// return tensor.acos();
/// }
/// >>> [13176794, 0]
/// // The fixed point representation of
/// // [1.5707..., 0]
/// ```
///
fn acos(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.onehot
///
/// ```rust
/// fn onehot(self: @Tensor<T>, depth: usize, axis: Option<usize>, values: Span<usize>) -> Tensor<usize>;
/// ```
///
/// Produces one-hot tensor based on input.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `depth`(`usize`) - Scalar or Rank 1 tensor containing exactly one element, specifying the number of classes in one-hot tensor.
/// * `axis`(`Option<bool>`) - Axis along which one-hot representation in added. Default: axis=-1.
/// * `values`(`Span<usize>`) - Rank 1 tensor containing exactly two elements, in the format [off_value, on_value]
///
/// ## Panics
///
/// * Panics if values is not equal to 2.
///
/// ## Returns
///
/// A new `Tensor<T>` one-hot encode of the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FP8x23, FixedTrait};
///
/// fn onehot_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2,2].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// FixedTrait::new_unscaled(3, false),
/// ]
/// .span(),
/// );
///
/// return tensor.onehot(depth: 3, axis: Option::None(()), values: array![0, 1].span());
/// }
/// >>> [[1. 0. 0.]
/// [0. 1. 0.]
/// [0. 0. 1.]]
/// ```
///
fn onehot(
self: @Tensor<T>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<T>;
/// #tensor.sqrt
///
/// ```rust
/// fn sqrt(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the square root of all elements of the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the arctangent (inverse of tangent) value of all elements in the input tensor.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn sqrt_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new_unscaled(0, false),
/// FixedTrait::new_unscaled(1, false),
/// FixedTrait::new_unscaled(2, false),
/// ]
/// .span(),
/// );
///
/// return tensor.sqrt();
/// }
/// >>> [0,8388608,11863169]
/// // The fixed point representation of
/// // [0,1,1.4142...]
/// ```
///
fn sqrt(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.concat
///
/// ```rust
/// fn concat(tensors: Span<Tensor<T>>, axis: usize, ) -> Tensor<T>;
/// ```
///
/// Concatenate a list of tensors into a single tensor.
///
/// ## Args
///
/// * `tensors`(` Span<Tensor<T>>,`) - Array of the input tensors.
/// * `axis`(`usize`) - Axis to concat on.
///
/// ## Panics
///
/// * Panic if tensor length is not greater than 1.
/// * Panics if dimension is not greater than axis.
///
/// ## Returns
///
/// A new `Tensor<T>` concatenated tensor of the input tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn concat_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let result = TensorTrait::concat(tensors: array![tensor1, tensor2].span(), axis: 0);
/// return result;
/// }
/// >>> [[0. 1.]
/// [2. 3.],
/// [0. 1.]
/// [2. 3.]]
///
/// result.shape
/// >>> (4, 2)
///
/// let result = TensorTrait::concat(tensors: array![tensor1, tensor2].span(), axis: 1);
/// return result;
/// }
/// >>> [[0. 1., 0., 1.]
/// [2. 3., 2., 3.]]
///
/// result.shape
/// >>> (2, 4 )
/// ```
///
fn concat(tensors: Span<Tensor<T>>, axis: usize,) -> Tensor<T>;
/// # tensor.quantize_linear
///
/// ```rust
/// fn quantize_linear(self: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>) -> Tensor::<Q>;
/// ```
///
/// Quantizes a Tensor using linear quantization.
///
/// The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point
/// to compute the low precision / quantized tensor. The scale factor and zero point must have same shape,
/// and can be either a scalar for per-tensor / per layer quantization, or a 1-D tensor for per-axis quantization.
/// The quantization formula is `y = saturate ((x / y_scale) + y_zero_point)`. For saturation, it saturates to `[-128, 127]`.
/// For (x / y_scale), it's rounding to the nearest even.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `y_scale`(`@Tensor<T>`) - Scale for doing quantization to get `y`.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for doing quantization to get `y`.
///
/// ## Returns
///
/// A new `Tensor<Q>` with the same shape as the input tensor, containing the quantized values.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor};
///
/// fn quantize_linear_example() -> Tensor<i8> {
/// // We instantiate a 1D Tensor here.
/// let x = TensorTrait::<i32>::new(
/// shape: array![6].span(),
/// data: array![0, 2, 3, 1, -254,-1000].span(),
/// );
///
/// // We instantiate the y_scale here.
/// let y_scale = TensorTrait::<i32>::new(
/// shape: array![1].span(), data: array![2].span(),
/// );
///
/// // We instantiate the y_zero_point here.
/// let y_zero_point = TensorTrait::<i32>::new(
/// shape: array![1].span(), data: array![1].span(),
/// );
///
/// return x.quantize_linear(@y_scale, @y_zero_point);
/// }
/// >>> [1, 2, 2, 127, -126, -128]
/// ```
///
fn quantize_linear(
self: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
/// # tensor.dequantize_linear
///
/// ```rust
/// fn dequantize_linear(self: @Tensor<Q>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>) -> Tensor::<T>;
/// ```
///
/// Dequantizes a Tensor using linear dequantization.
///
/// The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute
/// the full precision tensor. The dequantization formula is y = (x - x_zero_point) * x_scale. x_scale and
/// x_zero_point must have same shape, and can be either a scalar for per-tensor / per layer quantization,
/// or a 1-D tensor for per-axis quantization.
///
/// ## Args
///
/// * `self`(`@Tensor<Q>`) - The input tensor.
/// * `x_scale`(`@Tensor<T>`) - Scale for input `x`.
/// * `x_zero_point`(`@Tensor<T>`) - Zero point for input `x`.
///
/// ## Returns
///
/// A new `Tensor<T>` with the same shape as the input tensor, containing the dequantized values.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor};
///
/// fn dequantize_linear_example() -> Tensor<i32> {
/// // We instantiate a 1D Tensor here.
/// let x = TensorTrait::<i8>::new(
/// shape: array![4].span(),
/// data: array![0, 3, 125, 127].span(),
/// );
///
/// // We instantiate the x_scale here.
/// let x_scale = TensorTrait::<i32>::new(
/// shape: array![1].span(), data: array![2].span(),
/// );
///
/// // We instantiate the x_zero_point here.
/// let x_zero_point = TensorTrait::<i32>::new(
/// shape: array![1].span(), data: array![0].span(),
/// );
///
/// return x.dequantize_linear(@x_scale, @x_zero_point);
/// }
/// >>> [0, 6, 250, 254]
/// ```
///
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T>;
/// # tensor.qlinear_add
///
/// ```rust
/// fn qlinear_add(self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, b: @Tensor<i8>, b_scale: @Tensor<T>, b_zero_point: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>) -> Tensor::<i8>;
/// ```
///
/// Performs the sum of quantized Tensors
///
/// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
/// The quantization formula is y = saturate((x / y_scale) + y_zero_point).
/// It performs the addition of the two vectors once dequantized, then return the quantization of the result of the addition.
/// The broadcasting is supported
/// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
/// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
///
/// ## Args
///
/// * `self`(`@Tensor<i8>`) - The first tensor to be additionned (a).
/// * `a_scale`(`@Tensor<T>`) - Scale for input `a`.
/// * `a_zero_point`(`@Tensor<T>`) - Zero point for input `a`.
/// * `b`(`@Tensor<i8>`) - The second tensor to be additionned
/// * `b_scale`(`@Tensor<T>`) - Scale for input `b`.
/// * `b_zero_point`(`@Tensor<T>`) - Zero point for input `b`.
/// * `y_scale`(`@Tensor<T>`) - Scale for outut.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for output.
///
/// ## Returns
///
/// A new `Tensor<i8>`, containing the quantized result of the addition of the dequantized inputs.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
///
///
/// fn qlinear_add_example() -> Tensor<i8> {
/// let a = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 3].span(),
/// data: array![6, 6, 6, 11, 11, 11].span(),
/// );
///
/// // As the operator supports broadcasting shapes [1, 3] and [2, 3] are compatible
/// let b = TensorTrait::<i8>::new(
/// shape: array![1, 3].span(),
/// data: array![40, 40, 40].span(),
/// );
///
/// let a_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(131072, false)].span(),);
/// let a_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(65536, false)].span(),);
/// let b_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(16384, false)].span(),);
/// let b_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(0, false)].span(),);
///
/// let y_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(655360, false)].span(),);
/// let y_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(65536, true)].span(),);
///
/// return a
/// .qlinear_add(
/// @a_scale, @a_zero_point, @b, @b_scale, @b_zero_point, @y_scale, @y_zero_point
/// );
/// }
///
/// >>> [[1, 1, 1], [2, 2, 2]]
/// ```
///
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
/// # tensor.qlinear_mul
///
/// ```rust
/// fn qlinear_mul(self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, b: @Tensor<i8>, b_scale: @Tensor<T>, b_zero_point: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>) -> Tensor::<i8>;
/// ```
///
/// Performs the element-wise multiplication of quantized Tensors
///
/// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
/// The quantization formula is y = saturate((x / y_scale) + y_zero_point).
/// It performs the element-wise multiplication of the two vectors once dequantized, then return the quantization of the result of the multiplication.
/// The broadcasting is supported
/// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
/// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
///
/// ## Args
///
/// * `self`(`@Tensor<i8>`) - The first tensor to be multiplied (a).
/// * `a_scale`(`@Tensor<T>`) - Scale for input `a`.
/// * `a_zero_point`(`@Tensor<T>`) - Zero point for input `a`.
/// * `b`(`@Tensor<i8>`) - The second tensor to be multiplied
/// * `b_scale`(`@Tensor<T>`) - Scale for input `b`.
/// * `b_zero_point`(`@Tensor<T>`) - Zero point for input `b`.
/// * `y_scale`(`@Tensor<T>`) - Scale for outut.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for output.
///
/// ## Returns
///
/// A new `Tensor<i8>`, containing the quantized result of the element-wise multiplication of the dequantized inputs.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
///
/// ## Example
///
///
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
///
/// ```rust
/// #[test]
/// #[available_gas(200000000000)]
/// fn qlinear_mul_example() -> Tensor<i8>{
/// let a = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 3].span(),
/// data: array![21, 21, 21, 41, 41, 41]
/// .span(),
/// );
/// let b = TensorTrait::<
/// i8
/// >::new(
/// shape: array![1, 3].span(),
/// data: array![4, 8, 12].span(),
/// );
///
/// let a_scale = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(131072, false)].span(),
/// );
/// let a_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(65536, false)].span(),);
/// let b_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(16384, false)].span(),);
/// let b_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(0, false)].span(),);
///
/// let y_scale = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(393216, false)].span(),
/// );
/// let y_zero_point = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(655360, false)].span(),
/// );
///
/// return = a
/// .qlinear_mul(
/// @a_scale, @a_zero_point, @b, @b_scale, @b_zero_point, @y_scale, @y_zero_point
/// );
///
/// }
///
/// >>> [[16, 23, 30], [23, 36, 50]]
/// ```
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
/// # tensor.qlinear_matmul
///
/// ```rust
/// fn qlinear_matmul(self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, b: @Tensor<i8>, b_scale: @Tensor<T>, b_zero_point: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>) -> Tensor::<i8>;
/// ```
///
/// Multiplies quantized Tensors
///
/// It consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.
/// The quantization formula is y = saturate((x / y_scale) + y_zero_point).
/// It performs the multiplication of the two vectors once dequantized. If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes.
/// Then return the quantization of the result of the multiplication.
/// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
/// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
///
/// ## Args
///
/// * `self`(`@Tensor<i8>`) - The first tensor to be multiplied (a).
/// * `a_scale`(`@Tensor<T>`) - Scale for input `a`.
/// * `a_zero_point`(`@Tensor<T>`) - Zero point for input `a`.
/// * `b`(`@Tensor<i8>`) - The second tensor to be multiplied
/// * `b_scale`(`@Tensor<T>`) - Scale for input `b`.
/// * `b_zero_point`(`@Tensor<T>`) - Zero point for input `b`.
/// * `y_scale`(`@Tensor<T>`) - Scale for output.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for output.
///
/// ## Returns
///
/// A new `Tensor<i8>`, containing the quantized result of the multiplication of the dequantized inputs.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
/// fn qlinear_matmul_example() -> Tensor<i8> {
/// let a = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 3].span(),
/// data: array![
/// 3,
/// 4,
/// 5,
/// 2,
/// 4,
/// 3
/// ]
/// .span(),
/// );
/// let b = TensorTrait::<
/// i8
/// >::new(
/// shape: array![3, 1].span(),
/// data: array![
/// 4,
/// 8,
/// 4
/// ]
/// .span(),
/// );
///
/// let a_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(131072, false)].span(),);
/// let a_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(65536, false)].span(),);
/// let b_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(16384, false)].span(),);
/// let b_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(0, false)].span(),);
///
/// let y_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(393216, false)].span(),);
/// let y_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(655360, false)].span(),);
///
/// return a
/// .qlinear_matmul(
/// @a_scale, @a_zero_point, @b, @b_scale, @b_zero_point, @y_scale, @y_zero_point
/// );
/// }
/// >>> [14, 13]
/// ```
///
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
/// # tensor.qlinear_concat
///
/// ```rust
/// qlinear_concat(tensors: Span<Tensor<i8>>, scales: Span<Tensor<T>>, zero_points: Span<Tensor<T>>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>, axis: usize) -> Tensor::<i8>;
/// ```
///
/// Concatenate a list of tensors after dequantizing them with their respective scales and zero_points and returns the quantized result.
///
/// ## Args
///
/// * `tensors`(` Span<Tensor<i8>>,`) - Array of the quantized input tensors.
/// * `scales`(` Span<Tensor<T>>,`) - Array of the scales of the quantized input tensors.
/// * `zero_points`(` Span<Tensor<T>>,`) - Arrayof the zero_points of the quantized input tensors.
/// * `y_scale`(`@Tensor<T>`) - Scale for output.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for output.
/// * `axis`(`usize`) - Axis to concat on.
///
/// ## Panics
///
/// * Panic if tensor length is not greater than 1.
/// * Panics if dimension is not greater than axis.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
///
/// ## Returns
///
/// A new `Tensor<i8>` concatenated quantized tensor of the dequantized input tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
///
/// fn qlinear_concat_example() -> Tensor<i8> {
/// let tensor1 = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 2].span(),
/// data: array![
/// 5,
/// 5,
/// 5,
/// 5,
/// ]
/// .span(),
/// );
/// let tensor2 = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 2].span(),
/// data: array![
/// 1,
/// 1,
/// 1,
/// 1,
/// ]
/// .span(),
/// );
///
/// let tensors = array![tensor1, tensor2].span();
///
/// let tensor1_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(131072, false)].span(),);
/// let tensor2_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(262144, false)].span(),);
///
/// let scales = array![tensor1_scale, tensor2_scale].span();
///
/// let tensor1_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(327680, false)].span(),);
/// let tensor2_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(0, false)].span(),);
///
/// let zero_points = array![tensor1_zero_point, tensor2_zero_point].span();
///
/// let y_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(262144, false)].span(),);
///
/// let y_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(65536, false)].span(),);
///
/// return TensorTrait::qlinear_concat(tensors, scales, zero_points, @y_scale, @y_zero_point, 0);
/// }
///
/// >>> [[1, 1, 1, 1], [2, 2, 2, 2]]
/// ```
///
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
axis: usize
) -> Tensor::<i8>;
/// # tensor.qlinear_leakyrelu
///
/// ```rust
/// fn qlinear_leakyrelu(self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, alpha: T) -> Tensor::<i8>;
/// ```
///
/// Applies the Leaky Relu operator to a quantized Tensor
///
/// QLinar LeakyRelu takes as input a quantized Tensor, its scale and zero point and an scalar alpha, and produces one output data (a quantized Tensor)
/// where the function `f(x) = alpha * x for x < 0, f(x) = x for x >= 0`, is applied to the data tensor elementwise.
/// The quantization formula is y = saturate((x / y_scale) + y_zero_point).
/// Scale and zero point must have same shape and the same type. They must be either scalar (per tensor) or N-D tensor (per row for 'a' and per column for 'b').
/// Scalar refers to per tensor quantization whereas N-D refers to per row or per column quantization.
///
/// ## Args
///
/// * `self`(`@Tensor<i8>`) - The first tensor to be multiplied (a).
/// * `a_scale`(`@Tensor<T>`) - Scale for input `a`.
/// * `a_zero_point`(`@Tensor<T>`) - Zero point for input `a`.
/// * `alpha`(`T`) - The factor multiplied to negative elements.
///
/// ## Returns
///
/// A new `Tensor<i8>`, containing result of the Leaky Relu.
///
/// ## Type Constraints
///
/// u32 tensor, not supported.
/// fp8x23wide tensor, not supported.
/// fp16x16wide tensor, not supported.
/// bool tensor, not supported.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
///
///
/// fn qlinear_leakyrelu_example() -> Tensor<i8> {
/// let a = TensorTrait::<
/// i8
/// >::new(
/// shape: array![2, 3].span(),
/// data: array![
/// -10,
/// -10,
/// -10,
/// 10,
/// 10,
/// 10
/// ]
/// .span(),
/// );
///
/// let a_scale = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(327680, false)].span(),);
/// let a_zero_point = TensorTrait::<
/// FP16x16
/// >::new(shape: array![1].span(), data: array![FixedTrait::<FP16x16>::new(131072, false)].span(),);
///
/// let alpha = FixedTrait::<FP16x16>::new(655360, false);
///
/// return = a
/// .qlinear_leakyrelu(
/// @a_scale, @a_zero_point, alpha
/// );
/// }
///
/// >>> [[-118, -118, -118], [10, 10, 10]]
///
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, alpha: T
) -> Tensor::<i8>;
/// # tensor.slice
///
/// ```rust
/// fn slice(self: @Tensor<T>, starts: Span<usize>, ends: Span<usize>, axes: Option<Span<usize>>, steps: Option<Span<usize>>) -> Tensor<usize>;
/// ```
///
/// Produces a slice of the input tensor along multiple axes.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to extract slices from.
/// * `starts`(Span<usize>) - 1-D tensor of starting indices of corresponding axis in `axes`
/// * `ends`(Span<usize>) - 1-D tensor of ending indices (exclusive) of corresponding axis in `axes`
/// * `axes`(Option<Span<usize>>) - 1-D tensor of axes that `starts` and `ends` apply to.
/// * `steps`(Option<Span<usize>>) - 1-D tensor of slice step of corresponding axis in `axes`.
///
/// ## Panics
///
/// * Panics if the length of starts is not equal to the length of ends.
/// * Panics if the length of starts is not equal to the length of axes.
/// * Panics if the length of starts is not equal to the length of steps.
///
/// ## Returns
///
/// A new `Tensor<T>` slice of the input tensor.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn slice_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 4].span(),
/// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.slice(
/// starts: array![0, 2].span(),
/// ends: array![2, 4].span(),
/// axis: Option::None(()),
/// steps: Option::Some(array![1, 1].span())
/// );
/// }
/// >>> [[2 3]
/// [6 7]]
/// ```
///
fn slice(
self: @Tensor<T>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<T>;
/// # tensor.nonzero
///
/// ```rust
/// fn nonzero(self: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Produces indices of the elements that are non-zero (in row-major order - by dimension).
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to calculate non-zero indices.
///
/// ## Returns
///
/// A new `Tensor<usize>` indices of the elements that are non-zero (in row-major order - by dimension).
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn nonzero_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 4].span(),
/// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.nonzero();
/// }
/// >>> [[0 0 0 1 1 1 1]
/// [1 2 3 0 1 2 3]]
/// ```
///
fn nonzero(self: @Tensor<T>) -> Tensor<usize>;
/// # tensor.gather
///
/// ```rust
/// fn gather(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
/// ```
///
/// Gather entries of the axis dimension of data.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Tensor<i32>`) - Tensor of indices.
/// * `axis`(`Option<i32>`) - Axis to gather on. Default: axis=0.
///
/// ## Panics
///
/// * Panics if index values are not within bounds [-s, s-1] along axis of size s.
///
/// ## Returns
///
/// A new `Tensor<T>` .
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn gather_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 3].span(),
/// data: array![[ 1, 2, 3],[4, 5, 6]].span(),
/// );
/// let indices = TensorTrait::<i32>::new(
/// shape: array![1, 1].span(),
/// data: array![1, 0].span(),
/// );
///
/// return tensor.gather(
/// indices: indices,
/// axis: Option::None(()),
/// );
/// }
/// >>> [[4. 5. 6.]
/// [1. 2. 3.]]
/// ```
///
fn gather(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
/// # tensor.unsqueeze
///
/// ```rust
/// fn unsqueeze(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>;
/// ```
///
/// Insert single-dimensional entries to the shape of an input tensor (data). Takes one required input axes -
/// which contains a list of dimension indices and this operator will insert a dimension of value 1 into the
/// corresponding index of the output tensor (expanded).
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to unsquezee.
/// * `axes`(`Span<usize>`) - List of integers indicating the dimensions to be inserted.
///
/// ## Panics
///
/// * Panics if the given axes have duplicate elements.
/// * Panics if one of the given axes is invalid.
///
/// ## Returns
///
/// Reshaped `Tensor<T>` with same data as input.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn unsqueeze_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 4].span(),
/// data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// return tensor.unsqueeze(
/// axes: array![0, 3].span(),
/// );
/// }
/// >>> [[[[0]
/// [1]
/// [2]
/// [3]]
///
/// [[4]
/// [5]
/// [6]
/// [7]]]]
/// ```
///
fn unsqueeze(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>;
/// # tensor.squeeze
///
/// ```rust
/// fn squeeze(self: @Tensor<T>, axes: Option<Span<i32>>) -> Tensor<T>;
/// ```
///
/// Removes dimensions of size 1 from the shape of a tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to calculate non-zero indices.
/// * `axes`(`Option<Span<i32>>`) - List of integers indicating the dimensions to squeeze.
///
/// ## Returns
///
/// A new `Tensor<T>` Reshaped tensor with same data as input.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn squeeze_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![1, 2, 1, 2, 1].span(),
/// data: array![1, 1, 1, 1].span(),
/// );
///
/// return tensor.squeeze(axes: Option::None(());
/// }
/// >>> [[1 1]
/// [1 1]]
/// ```
///
fn squeeze(self: @Tensor<T>, axes: Option<Span<u32>>) -> Tensor<T>;
/// # tensor.clip
///
/// ```rust
/// fn clip(self: @Tensor<T>, min: T, max: T) -> Tensor<T>;
/// ```
///
/// Clip operator limits the given input within an interval.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Input tensor whose elements to be clipped.
/// * `min`(`Option<T>`) - Minimum value, under which element is replaced by min.
/// * `max`(`Option<T>`) - Maximum value, above which element is replaced by max.
///
/// ## Returns
///
/// Output `Tensor<T>` with clipped input elements.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn clip_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 3].span(),
/// data: array![[ 1, 2, 3],[4, 5, 6]].span(),
/// );
///
/// return tensor.clip(
/// min: Option::None(()),
/// max: Option::Some(3),
/// );
/// }
/// >>> [[1. 2. 3.]
/// [3. 3. 3.]]
/// ```
///
fn clip(self: @Tensor<T>, min: Option<T>, max: Option<T>) -> Tensor<T>;
/// # tensor.sign
///
/// ```rust
/// fn sign(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Calculates the sign of the given input tensor element-wise.
/// If input > 0, output 1. if input < 0, output -1. if input == 0, output 0.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to calculates the sign of the given input tensor element-wise.
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with The sign of the input tensor computed element-wise.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
///
/// fn sign_example() -> Tensor<i32> {
/// let tensor = TensorTrait::<i32>::new(
/// shape: array![11].span(),
/// data: array![-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5].span(),
/// );
///
/// return tensor.sign();
/// }
/// >>> [-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]
/// ```
///
fn sign(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.identity
///
/// ```rust
/// fn identity(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Return a Tensor with the same shape and contents as input.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Input tensor.
///
/// ## Returns
///
/// A new `Tensor<T>` to copy input into.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, i32Tensor};
///
/// fn identity_example() -> Tensor<i32> {
/// let tensor = TensorTrait::<i32>::new(
/// shape: array![2, 2].span(),
/// data: array![1, 2, 3, 4].span(),
/// );
/// let t_identity = tensor.identity();
/// t_identity
/// }
/// >>> [[1 2] [3 4]] // A Tensor with the same shape and contents as input
/// ```
///
fn identity(self: @Tensor<T>) -> Tensor<T>;
/// #tensor.and
///
/// ```rust
/// fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
/// ```
///
/// Computes the logical AND of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<bool>`) - The first tensor to be compared
/// * `other`(`@Tensor<bool>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<bool>` with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor};
///
/// fn and_example() -> Tensor<bool> {
/// let tensor_1 = TensorTrait::<bool>::new(
/// shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(),
/// );
///
/// let tensor_2 = TensorTrait::<bool>::new(
/// shape: array![3, 3].span(), data: array![false, false, true, true, false, true, false, true, false, true, false, true].span(),
/// );
///
/// return tensor_1.and(@tensor_2);
/// }
/// >>> [false, false, false, false, false, true, false, false, false, false, false, true]
/// ```
///
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
/// #tensor.where
///
/// ```rust
/// fn where(self: @Tensor<T>, x: @Tensor<T>, y: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes a new tensor by selecting values from tensor x (resp. y) at
/// indices where the condition is 1 (resp. 0).
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The condition tensor
/// * `x`(`@Tensor<T>`) - The first input tensor
/// * `y`(`@Tensor<T>`) - The second input tensor
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// Return a new `Tensor<T>` of the same shape as the input with elements
/// chosen from x or y depending on the condition.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn where_example() -> Tensor<u32> {
/// let tensor_cond = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(), data: array![0, 1, 0, 1].span(),
/// );
///
/// let tensor_x = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(), data: array![2, 4, 6, 8].span(),
/// );
///
/// let tensor_y = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(), data: array![1, 3, 5, 9].span(),
/// );
///
/// return tensor_cond.where(@tensor_1, @tensor_2);
/// }
/// >>> [1,4,5,8]
/// ```
///
fn where(self: @Tensor<T>, x: @Tensor<T>, y: @Tensor<T>) -> Tensor<T>;
/// #tensor.resize
///
/// ```rust
/// fn resize(
/// self: @Tensor<T>,
/// roi: Option<Tensor<T>>,
/// scales: Option<Span<T>>,
/// sizes: Option<Span<usize>>,
/// antialias: Option<usize>,
/// axes: Option<Span<usize>>,
/// coordinate_transformation_mode: Option<orion::operators::tensor::math::resize::TRANSFORMATION_MODE>,
/// cubic_coeff_a: Option<T>,
/// exclude_outside: Option<bool>,
/// extrapolation_value: Option<T>,
/// keep_aspect_ratio_policy: Option<orion::operators::tensor::math::resize::KEEP_ASPECT_RATIO_POLICY>,
/// mode: Option<orion::operators::tensor::math::resize::MODE>,
/// nearest_mode: Option<orion::operators::tensor::math::resize::NEAREST_MODE>,
/// ) -> Tensor<T>;
/// ```
///
/// Resizes the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood in the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `roi` (`Option<Tensor<T>>`) (optional) - 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. It only takes effect when coordinate_transformation_mode is "tf_crop_and_resize"
/// * `scales` (`Option<Tensor<T>>`) (optional) - The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X' or the length of 'axes', if provided. One and only one of 'scales' and 'sizes' MUST be specified.
/// * `sizes` (`Option<Tensor<usize>>`) (optional) - Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value. The number of elements of 'sizes' should be the same as the rank of input 'X', or the length of 'axes', if provided. One and only one of 'scales' and 'sizes' MUST be specified.
/// * `antialias` (`Option<usize>`) (default is 0) - If set to 1, "linear" and "cubic" interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale).
/// * `axes`(`Option<Span<usize>>`) - If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data).
/// * `coordinate_transformation_mode` (`Option<TRANSFORMATION_MODE>`) (default is half_pixel) - This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
/// * `cubic_coeff_a` (`Option<T>`) (default is -0.75) - The coefficient 'a' used in cubic interpolation.
/// * `exclude_outside` (`Option<bool>`) (default is false) - If set to true, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0.
/// * `extrapolation_value` (`Option<T>`) (default is 0.0) - When coordinate_transformation_mode is "tf_crop_and_resize" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value.
/// * `keep_aspect_ratio_policy` (`Option<KEEP_ASPECT_RATIO_POLICY>`) (default is stretch) - This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when the `scales` input is used.
/// * `mode` (`Option<MODE>`) (default is nearest) - Three interpolation modes: "nearest", "linear" and "cubic".
/// * `nearest_mode` (`Option<NEAREST_MODE>`) (default is round_prefer_floor) - Four modes: "round_prefer_floor" (as known as round half down), "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only used by nearest interpolation.
///
/// ## Panics
///
/// * Panics if both scales and sizes are `Option::None`.
/// * Panics if roi is `Option::None` for the coordinate_transformation_mode `tf_crop_and_resize`.
/// * Panics if antialias is not `Option::None` for mode `nearest`.
///
/// ## Returns
///
/// A new resized `Tensor<T>` of the dimension given by output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) is scale is specified, or output_size if size is specified (note that some value of the parameter `keep_aspect_ratio_policy` can change sizes and therefore the dimension of the output tensor)
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor, FP16x16TensorPartialEq};
/// use orion::operators::tensor::math::resize::{
/// MODE, NEAREST_MODE, KEEP_ASPECT_RATIO_POLICY, TRANSFORMATION_MODE
/// };
/// use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
/// use core::debug::PrintTrait;
///
/// fn example_resize_downsample_scales_linear() -> Tensor<FP16x16>{
/// let mut data = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1, 1, 2, 4].span(),
/// data: array![
/// FixedTrait::<FP16x16>::new(65536, false), //1
/// FixedTrait::<FP16x16>::new(131072, false), //2
/// FixedTrait::<FP16x16>::new(196608, false), //3
/// FixedTrait::<FP16x16>::new(262144, false), //4
/// FixedTrait::<FP16x16>::new(327680, false), //5
/// FixedTrait::<FP16x16>::new(393216, false), //6
/// FixedTrait::<FP16x16>::new(458752, false), //7
/// FixedTrait::<FP16x16>::new(524288, false), //8
/// ]
/// .span(),
/// );
/// let mut scales = array![
/// FixedTrait::<FP16x16>::new(65536, false), //1
/// FixedTrait::<FP16x16>::new(65536, false),
/// FixedTrait::<FP16x16>::new(39322, false), //0.6
/// FixedTrait::<FP16x16>::new(39322, false)
/// ]
/// .span();
///
/// let scales = Option::Some(scales);
///
/// return data.resize(
/// Option::None,
/// scales,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::Some(MODE::LINEAR),
/// Option::None,
/// );
///
/// }
/// >>> [[[[2.6666665 4.3333331]]]]
///
///
///
/// fn example_resize_tf_crop_and_resize_extrapolation_value() -> Tensor<FP16x16> {
/// let mut data = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1, 1, 4, 4].span(),
/// data: array![
/// FixedTrait::<FP16x16>::new(65536, false),
/// FixedTrait::<FP16x16>::new(131072, false),
/// FixedTrait::<FP16x16>::new(196608, false),
/// FixedTrait::<FP16x16>::new(262144, false),
/// FixedTrait::<FP16x16>::new(327680, false),
/// FixedTrait::<FP16x16>::new(393216, false),
/// FixedTrait::<FP16x16>::new(458752, false),
/// FixedTrait::<FP16x16>::new(524288, false),
/// FixedTrait::<FP16x16>::new(589824, false),
/// FixedTrait::<FP16x16>::new(655360, false),
/// FixedTrait::<FP16x16>::new(720896, false),
/// FixedTrait::<FP16x16>::new(786432, false),
/// FixedTrait::<FP16x16>::new(851968, false),
/// FixedTrait::<FP16x16>::new(917504, false),
/// FixedTrait::<FP16x16>::new(983040, false),
/// FixedTrait::<FP16x16>::new(1048576, false),
/// ]
/// .span(),
/// );
///
/// let mut roi = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![8].span(),
/// data: array![
/// FixedTrait::<FP16x16>::new(0, false),
/// FixedTrait::<FP16x16>::new(0, false),
/// FixedTrait::<FP16x16>::new(26214, false),
/// FixedTrait::<FP16x16>::new(39322, false),
/// FixedTrait::<FP16x16>::new(65536, false),
/// FixedTrait::<FP16x16>::new(65536, false),
/// FixedTrait::<FP16x16>::new(78643, false),
/// FixedTrait::<FP16x16>::new(111411, false),
/// ]
/// .span(),
/// );
/// let roi = Option::Some(roi);
///
/// let mut sizes = array![1, 1, 3, 3].span();
/// let sizes = Option::Some(sizes);
///
/// let extrapolation_value = Option::Some(FixedTrait::<FP16x16>::new(655360, false));
///
/// return data.resize(
/// roi,
/// Option::None,
/// sizes,
/// Option::None,
/// Option::None,
/// Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),
/// Option::None,
/// Option::None,
/// extrapolation_value,
/// Option::None,
/// Option::Some(MODE::LINEAR),
/// Option::None,
/// );
///
/// }
/// >>> [[[[ 7.6000004 10. 10. ]
/// [12.400001 10. 10. ]
/// [10. 10. 10. ]]]]
///
///
///
/// fn example_resize_downsample_sizes_cubic_antialias() -> Tensor<FP16x16> {
/// let mut data = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![1, 1, 4, 4].span(),
/// data: array![
/// FixedTrait::<FP16x16>::new(65536, false),
/// FixedTrait::<FP16x16>::new(131072, false),
/// FixedTrait::<FP16x16>::new(196608, false),
/// FixedTrait::<FP16x16>::new(262144, false),
/// FixedTrait::<FP16x16>::new(327680, false),
/// FixedTrait::<FP16x16>::new(393216, false),
/// FixedTrait::<FP16x16>::new(458752, false),
/// FixedTrait::<FP16x16>::new(524288, false),
/// FixedTrait::<FP16x16>::new(589824, false),
/// FixedTrait::<FP16x16>::new(655360, false),
/// FixedTrait::<FP16x16>::new(720896, false),
/// FixedTrait::<FP16x16>::new(786432, false),
/// FixedTrait::<FP16x16>::new(851968, false),
/// FixedTrait::<FP16x16>::new(917504, false),
/// FixedTrait::<FP16x16>::new(983040, false),
/// FixedTrait::<FP16x16>::new(1048576, false),
/// ]
/// .span(),
/// );
///
/// let antialias = Option::Some(1);
///
/// let mut sizes = array![1, 1, 3, 3].span();
/// let sizes = Option::Some(sizes);
///
/// return data.resize(
/// Option::None,
/// Option::None,
/// sizes,
/// antialias,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::None,
/// Option::Some(MODE::CUBIC),
/// Option::None,
/// );
/// }
///
/// >>> [[[[ 1.7750092 3.1200073 4.4650054]
/// [ 7.1550016 8.5 9.844998 ]
/// [12.534994 13.8799925 15.224991 ]]]]
///
/// ```
///
fn resize(
self: @Tensor<T>,
roi: Option<Tensor<T>>,
scales: Option<Span<T>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<
orion::operators::tensor::math::resize::TRANSFORMATION_MODE
>,
cubic_coeff_a: Option<T>,
exclude_outside: Option<bool>,
extrapolation_value: Option<T>,
keep_aspect_ratio_policy: Option<
orion::operators::tensor::math::resize::KEEP_ASPECT_RATIO_POLICY
>,
mode: Option<orion::operators::tensor::math::resize::MODE>,
nearest_mode: Option<orion::operators::tensor::math::resize::NEAREST_MODE>,
) -> Tensor<T>;
/// #tensor.round
///
/// ```rust
/// fn round(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the round value of all elements in the input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the round value of all elements in the input tensor.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor};
/// use orion::numbers::{FixedTrait, FP16x16};
///
/// fn round_example() -> Tensor<FP16x16> {
/// let tensor = TensorTrait::<FP16x16>::new(
/// shape: array![3].span(),
/// data: array![
/// FixedTrait::new(190054, false), // 2.9
/// ]
/// .span(),
/// );
///
/// return tensor.round();
/// }
/// >>> [3]
/// ```
///
fn round(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.scatter
///
/// ```rust
/// fn scatter(self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, axis: Option<usize>, reduction: Option<usize>) -> Tensor<T>;
/// ```
///
/// Produces a copy of input data, and updates value to values specified by updates at specific index positions specified by indices.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `updates`(`Tensor<T>`) - The updates tensor.
/// * `indices`(`Tensor<T>`) - Tensor of indices.
/// * `axis`(`Option<usize>`) - Axis to scatter on. Default: axis=0.
/// * `reduction`(`Option<usize>`) - Reduction operation. Default: reduction='none'.
///
/// ## Panics
///
/// * Panics if index values are not within bounds [-s, s-1] along axis of size s.
///
/// ## Returns
///
/// A new `Tensor<T>` .
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn scatter_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![3, 5].span(),
/// data: array![[ 0, 0, 0, 0, 0],
/// [ 0, 0, 0, 0, 0],
/// [ 0, 0, 0, 0, 0]].span(),
/// );
/// let updates = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(),
/// data: array![[ 1, 2, 3],
/// [ 4, 5, 6],
/// [ 7, 8, 9]].span(),
/// );
/// let indices = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(),
/// data: array![[ 0, 1, 2],
/// [ 2, 0, 1],
/// [ 1, 0, 1]].span(),
/// );
///
/// return tensor.scatter(
/// updates: updates
/// indices: indices,
/// axis: Option::None(()),
/// reduction: Option::None(()),
/// );
/// }
/// >>> [[ 1, 8, 0, 0, 0],
/// [ 7, 2, 9, 0, 0],
/// [ 4, 0, 3, 0, 0]]
/// ```
///
fn scatter(
self: @Tensor<T>,
updates: Tensor<T>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<T>;
/// # tensor.trilu
///
/// ```rust
/// fn trilu(self: @Tensor<T>, upper: bool, k: i64) -> Tensor<T>;
/// ```
///
/// Returns a new tensor with the uppper/lower triangular part of the tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `upper`(`bool`) - if true, returns the upper triangular part of the tensor, otherwise returns the lower part.
/// * `k`(`i64`) - value corresponding to the number diagonals above or below the main diagonal to exclude or include.
///
/// ## Panics
///
/// * Panics if the dimension of the tensor is less than 2.
///
/// ## Returns
///
/// A `Tensor<T>` instance with the uppper/lower triangular part of the tensor.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn trilu_tensor_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 3, 3].span(), data: array![0, 4, 3, 2, 0, 9, 8, 2, 5, 2, 7, 2, 2, 6, 0, 2, 6 ,5].span(),
/// );
///
/// // We can call `trilu` function as follows.
/// return tensor.trilu(false, 0);
/// }
/// >>> [[[0, 0, 0],[2, 0, 0], [8, 2, 5]], [[2, 0, 0], [2, 6, 0], [2, 6, 5]]]
/// ```
///
fn trilu(self: @Tensor<T>, upper: bool, k: i64) -> Tensor<T>;
/// #tensor.bitwise_and
///
/// ```rust
/// fn bitwise_and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Computes the bitwise AND of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<T>` with the same shape as the broadcasted inputs.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn and_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 0, 4, 5, 0, 6, 2].span(),
/// );
///
/// return tensor_1.bitwise_and(@tensor_2);
/// }
/// >>> [0,1,2,0,4,5,0,6,2]
/// ```
///
fn bitwise_and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// #tensor.bitwise_or
///
/// ```rust
/// fn bitwise_or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Computes the bitwise OR of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<T>` with the same shape as the broadcasted inputs.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn or_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 0, 4, 5, 0, 6, 2].span(),
/// );
///
/// return tensor_1.bitwise_or(@tensor_2);
/// }
/// >>> [0,1,2,3,4,5,6,7,10]
/// ```
///
fn bitwise_or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// #tensor.bitwise_xor
///
/// ```rust
/// fn bitwise_xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
/// ```
///
/// Computes the bitwise XOR of two tensors element-wise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<T>` with the same shape as the broadcasted inputs.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn xor_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 0, 4, 5, 0, 6, 2].span(),
/// );
///
/// return tensor_1.bitwise_xor(@tensor_2);
/// }
/// >>> [0,0,0,3,0,0,6,1,10]
/// ```
///
fn bitwise_xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// ## tensor.reduce_l1
///
/// ```rust
/// fn reduce_l1(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the L1 norm of the input tensor's elements along the provided axes.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by summing its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_l1_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_l1` function as follows.
/// return tensor.reduce_l1(axis: 1, keepdims: false);
/// }
/// >>> [[2,4],[10,12]]
/// ```
///
fn reduce_l1(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.reduce_l2
///
/// ```rust
/// fn reduce_l2(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the L2 norm of the input tensor's elements along the provided axes.
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by summing its elements.
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_l2_example() -> Tensor<u32> {
///
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(2);
/// shape.append(2);
/// let mut data = ArrayTrait::new();
/// data.append(FixedTrait::new_unscaled(1, false));
/// data.append(FixedTrait::new_unscaled(2, false));
/// data.append(FixedTrait::new_unscaled(3, false));
/// data.append(FixedTrait::new_unscaled(5, false));
/// let tensor = TensorTrait::<FP8x23>::new(shape.span(), data.span());
///
/// We can call `reduce_l2` function as follows.
/// return tensor.reduce_l2(axis: 1, keepdims: true);
/// }
/// >>> [[0x11e3779, 0x2ea5ca1]]
/// ```
///
fn reduce_l2(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.reduce_sum_square
///
/// ```rust
/// fn reduce_sum_square(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the sum square of the input tensor's elements along the provided axes.
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by summing its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_sum_square_example() -> Tensor<u32> {
///
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(2);
/// shape.append(2);
/// let mut data = ArrayTrait::new();
/// data.append(1);
/// data.append(2);
/// data.append(3);
/// data.append(4);
/// let tensor = TensorTrait::<u32>::new(shape.span(), data.span());
///
/// We can call `reduce_sum_square` function as follows.
/// return tensor.reduce_sum_square(axis: 1, keepdims: true);
/// }
/// >>> [[5, 25]]
/// ```
///
fn reduce_sum_square(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// # tensor.constant_of_shape
///
/// ```rust
/// fn constant_of_shape(shape: Span<usize>, value: T) -> Tensor<T>;
/// ```
///
/// Returns a new tensor with the given shape and constant value.
///
/// ## Args
///
/// * `shape`(`Span<usize>`) - A span representing the shape of the tensor.
/// * `value` (`T`) - the constant value.
///
/// ## Returns
///
/// A new `Tensor<T>` instance.
///
/// ## Examples
///
/// Let's create new u32 Tensor with constant 42.
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{
/// TensorTrait, // we import the trait
/// Tensor, // we import the type
/// U32Tensor // we import the implementation.
/// };
///
/// fn constant_of_shape_example() -> Tensor<u32> {
/// let tensor = TensorTrait::constant_of_shape(shape: array![3].span(), value: 42);
///
/// return tensor;
/// }
///
/// >>> [42, 42, 42]
/// ```
///
fn constant_of_shape(shape: Span<usize>, value: T) -> Tensor<T>;
/// # tensor.gather_elements
///
/// ```rust
/// fn gather_elements(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
/// ```
///
/// GatherElements is an indexing operation that produces its output by indexing into the input data tensor at index positions determined by elements of the indices tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Tensor<i32>`) - Tensor of indices.
/// * `axis`(`Option<i32>`) - Axis to gather_elements on. Default: axis=0.
///
/// ## Panics
///
/// * Panics if index values are not within bounds [-s, s-1] along axis of size s.
///
/// ## Returns
///
/// A new `Tensor<T>` .
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn gather_elements_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(),
/// data: array![[ 1, 2, 3],[4, 5, 6], [7, 8, 9]].span(),
/// );
/// let indices = TensorTrait::<i32>::new(
/// shape: array![1, 2, 0].span(),
/// data: array![2, 0, 0].span(),
/// );
///
/// return tensor.gather_elements(
/// indices: indices,
/// axis: Option::None(()),
/// );
/// }
/// >>> [[4. 8. 3.]
/// [7. 2. 3.]]
/// ```
///
fn gather_elements(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
/// # tensor.binarizer
///
/// ```rust
/// fn binarizer(self: @Tensor<T>, threshold: Option<T>) -> Tensor<T>
/// ```
///
/// Maps the values of a tensor element-wise to 0 or 1 based on the comparison against a threshold value.
///
/// ## Args
/// * `self`(`@Tensor<T>`) - The input tensor to be binarized.
/// * `threshold`(`Option<T>`) - The threshold for the binarization operation.
///
/// ## Returns
/// A new `Tensor<T>` of the same shape as the input tensor with binarized values.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point numbers.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn binarizer_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new(0, false),
/// FixedTrait::new(1, false),
/// FixedTrait::new(2, false),
/// FixedTrait::new(3, false)
/// ]
/// .span(),
/// );
/// let threshold = Option::Some(FixedTrait::new(1, false))
///
/// return tensor.binarizer(@tensor, threshold);
/// }
/// >>> [0, 0, 8388608, 8388608]
/// // The fixed point representation of
/// [0, 0, 1, 1]
/// ```
///
fn binarizer(self: @Tensor<T>, threshold: Option<T>) -> Tensor<T>;
/// # tensor.array_feature_extractor
///
/// ```rust
/// fn array_feature_extractor(self: @Tensor<T>, indices: Tensor<usize>) -> Tensor<T>;
/// ```
///
/// Selects elements of the input tensor based on the indices passed applied to the last tensor axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Tensor<usize>`) - Tensor of indices.
///
/// ## Panics
///
/// * Panics if indices tensor is not 1-dimensional.
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with selected elements based on provided indices.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor, U32Tensor};
///
/// fn array_feature_extractor_example() -> Tensor<i32> {
/// let input_tensor = TensorTrait::new(
/// shape: array![3, 4].span(),
/// data: array![
/// 0, 1, 2, 3,
/// 4, 5, 6, 7,
/// 8, 9, 10, 11
/// ].span(),
/// );
///
/// let indices = TensorTrait::<u32>::new(
/// shape: array![2].span(), data: array![1, 3].span(),
/// );
///
/// return tensor.array_feature_extractor(@input_tensor, @indices);
/// }
/// >>> [[1, 3]
/// [5, 7]
/// [9, 11]]
/// ```
///
fn array_feature_extractor(self: @Tensor<T>, indices: Tensor<usize>) -> Tensor<T>;
/// # tensor.shrink
///
/// ```rust
/// fn shrink(self: @Tensor<T>, bias: Option<T>, lambd: Option<T>) -> Tensor<T>
/// ```
///
/// Shrinks the input tensor element-wise to the output tensor with the same datatype and shape based on the following formula:
/// If x < -lambd: y = x + bias; If x > lambd: y = x - bias; Otherwise: y = 0.
///
/// ## Args
/// * `self`(`@Tensor<T>`) - The input tensor to be shrinked.
/// * `bias`(`Option<T>`) - The bias value added to or subtracted from input tensor values.
/// * `lambd`(`Option<T>`) - The lambd value defining the shrink condition.
///
/// ## Returns
/// A new `Tensor<T>` of the same datatype and shape as the input tensor with shrinked values.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point numbers.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn shrink_example() -> Tensor<FP8x23> {
/// let tensor = TensorTrait::<FP8x23>::new(
/// shape: array![2, 2].span(),
/// data: array![
/// FixedTrait::new(2, true),
/// FixedTrait::new(1, true),
/// FixedTrait::new(1, false),
/// FixedTrait::new(2, false)
/// ]
/// .span(),
/// );
/// let bias = Option::Some(FixedTrait::new(1, false))
/// let lambd = Option::Some(FixedTrait::new(1, false))
///
/// return tensor.shrink(tensor, bias, lambd);
/// }
/// >>> [-8388608, 0, 0, 8388608]
/// // The fixed point representation of
/// [-1, 0, 0, 1]
/// ```
///
fn shrink(self: Tensor<T>, bias: Option<T>, lambd: Option<T>) -> Tensor<T>;
/// ## tensor.reduce_mean
///
/// ```rust
/// fn reduce_mean(self: @Tensor<T>, axes: Option<Span<usize>>, keepdims: Option<bool>, noop_with_empty_axes: Option<bool>) -> Tensor<T>;
/// ```
///
/// Computes the mean of the input tensor's elements along the provided axes.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axes`(`Option<Span<usize>>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true.
/// * `keepdims`(`Option<bool>`) - Keep the reduced dimension or not, default true means keep reduced dimension.
/// * `noop_with_empty_axes`(`Option<bool>`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axes reduced by meaning its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_mean_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_mean` function as follows.
/// return tensor.reduce_mean(axes: array![1].span(),
/// keepdims: Option::None(()),
/// noop_with_empty_axes: Option::None(()));
/// }
/// >>> [[1,2],[5,6]]
/// ```
///
fn reduce_mean(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
/// ## tensor.reduce_min
///
/// ```rust
/// fn reduce_min(self: @Tensor<T>, axes: Option<Span<usize>>, keepdims: Option<bool>, noop_with_empty_axes: Option<bool>) -> Tensor<T>;
/// ```
///
/// Computes the min of the input tensor's elements along the provided axes.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axes`(`Option<Span<usize>>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true.
/// * `keepdims`(`Option<bool>`) - Keep the reduced dimension or not, default true means keep reduced dimension.
/// * `noop_with_empty_axes`(`Option<bool>`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axes reduced by minimum of its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_min_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_min` function as follows.
/// return tensor.reduce_min(axes: array![1].span(),
/// keepdims: Option::None(()),
/// noop_with_empty_axes: Option::None(()));
/// }
/// >>> [[0,1],[4,5]]
/// ```
///
fn reduce_min(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
/// #tensor.pow
///
/// ```rust
/// fn pow(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Pow takes input data (Tensor) and exponent Tensor, and produces one output data (Tensor) where the function f(x) = x^exponent, is applied to the data tensor elementwise.
/// The input tensors must have either:
/// * Exactly the same shape
/// * The same number of dimensions and the length of each dimension is either a common length or 1.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor, base of the exponent.
/// * `other`(`@Tensor<T>`) - The second tensor, power of the exponent.
///
/// ## Panics
///
/// * Panics if the shapes are not equal or broadcastable
///
/// ## Returns
///
/// A new `Tensor<T>` with the same shape as the broadcasted inputs.
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn pow_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 0, 1, 2, 0, 1, 2].span(),
/// );
///
/// return tensor_1.pow(@tensor_2);
/// }
/// >>> [0,1,4,0,4,25,0,7,64]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn pow_example() -> Tensor<usize> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![1, 3].span(), data: array![0, 1, 2].span(),
/// );
///
/// return tensor_1.pow(@tensor_2);
/// }
/// >>> [0,1,4,0,4,25,0,7,64]
/// ```
///
fn pow(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
/// ## tensor.reduce_prod
///
/// ```rust
/// fn reduce_prod(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Reduces a tensor by multiplying its elements along a specified axis.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by multiplying its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_prod_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_prod` function as follows.
/// return tensor.reduce_prod(axis: 0, keepdims: false);
/// }
/// >>> [[0,5],[12,21]]
/// ```
///
fn reduce_prod(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.is_inf
///
/// ```rust
/// fn is_inf(self: @Tensor<T>, detect_negative: Option<u8>, detect_positive: Option<u8>) -> Tensor<bool>;
/// ```
///
/// Maps infinity to true and other values to false.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `detect_negative`(`Option<u8>`) - Optional Whether map negative infinity to true. Default to 1 so that negative infinity induces true.
/// * `detect_positive`(`Option<u8>`) - Optional Whether map positive infinity to true. Default to 1 so that positive infinity induces true.
///
///
/// ## Returns
///
/// A new `Tensor<bool>` instance with entries set to true iff the input tensors corresponding element was infinity.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor};
///
/// fn is_inf_example() -> Tensor<bool> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![6].span(), data: array![1, 0, NumberTrait::INF(), 8, NumberTrait::INF(), NumberTrait::INF()].span(),
/// );
///
/// return tensor.is_inf(detect_negative: Option::None, detect_positive: Option::None);
/// }
/// >>> [false, false, true, false, true, true]
/// ```
///
fn is_inf(
self: @Tensor<T>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool>;
/// ## tensor.is_nan
///
/// ```rust
/// fn is_nan(self: @Tensor<T>) -> Tensor<bool>;
/// ```
///
/// Maps NaN to true and other values to false.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// A new `Tensor<bool>` instance with entries set to true iff the input tensors corresponding element was NaN.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor};
/// use orion::numbers::{FixedTrait, FP8x23};
///
/// fn is_nan_example() -> Tensor<bool> {
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(4);
///
/// let mut data = ArrayTrait::new();
/// data.append(FP8x23 { mag: 10066329, sign: true });
/// data.append(FP8x23 { mag: 0, sign: false });
/// data.append(FixedTrait::NaN());
/// data.append(FP8x23 { mag: 23488102, sign: false });
/// let tensor = TensorTrait::new(shape.span(), data.span())
///
/// return tensor.is_nan();
/// }
/// >>> [false, false, true, false]
/// ```
///
fn is_nan(self: @Tensor<T>) -> Tensor<bool>;
/// #tensor.not
///
/// ```rust
/// fn not(self: @Tensor<bool>) -> Tensor<bool;
/// ```
///
/// Computes the negation of the elements in the bool type input tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<bool>`) - The input tensor.
///
///
/// ## Returns
///
/// A new `Tensor<bool>` of the same shape as the input tensor with
/// the negation of all elements in the input tensor.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor};
///
/// fn not_example() -> Tensor<bool> {
/// let tensor = TensorTrait::new(
/// shape: array![3].span(),
/// data: array![
/// true, true, false
/// ]
/// .span(),
/// );
///
/// return tensor.not();
/// }
/// >>> [true, true, false]
/// ```
///
fn not(self: @Tensor<T>) -> Tensor<T>;
/// ## tensor.reduce_log_sum
///
/// ```rust
/// fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the log sum of the input tensor's elements along the provided axes.
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by summing its elements.
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor};
/// use orion::numbers::{FixedTrait, FP16x16};
///
/// fn reduce_log_sum() -> Tensor<FP16x16> {
///
/// let mut sizes = ArrayTrait::new();
/// sizes.append(2);
/// sizes.append(2);
/// sizes.append(2);
///
/// let mut data = ArrayTrait::new();
/// data.append(FixedTrait::new_unscaled(1, false));
/// data.append(FixedTrait::new_unscaled(2, false));
/// data.append(FixedTrait::new_unscaled(3, false));
/// data.append(FixedTrait::new_unscaled(4, false));
/// data.append(FixedTrait::new_unscaled(5, false));
/// data.append(FixedTrait::new_unscaled(6, false));
/// data.append(FixedTrait::new_unscaled(7, false));
/// data.append(FixedTrait::new_unscaled(8, false));
///
/// let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
///
/// We can call `reduce_log_sum` function as follows.
/// return tensor.reduce_log_sum(axis: 2, keepdims: false);
/// }
/// >>> [[0x11938, 0x1f203], [0x265d9, 0x2b540]]
/// ```
///
fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.reduce_log_sum_exp
///
/// ```rust
/// fn reduce_log_sum_exp(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the log sum of the exponentials of the input tensor's elements along the provided axes.
///
/// ## Args
/// * 'self'(`@Tensor<T>`) - The input tensor.
/// * 'axis'(`usize`) - The dimension to reduce.
/// * 'keepdims'(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// Returns a new `Tensor<T>` instance with the specified axis reduced by summing its elements.
///
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::operators::tensor::FP32x32Tensor;
/// use orion::numbers::{FixedTrait, FP32x32};
///
/// fn reduce_log_sum_exp() -> Tensor<FP32x32> {
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(3);
/// shape.append(2);
/// shape.append(2);
///
/// let mut data = ArrayTrait::new();
/// data.append(FP32x32 { mag: 4294967296, sign: false });
/// data.append(FP32x32 { mag: 8589934592, sign: false });
/// data.append(FP32x32 { mag: 12884901888, sign: false });
/// data.append(FP32x32 { mag: 17179869184, sign: false });
/// data.append(FP32x32 { mag: 21474836480, sign: false });
/// data.append(FP32x32 { mag: 25769803776, sign: false });
/// data.append(FP32x32 { mag: 30064771072, sign: false });
/// data.append(FP32x32 { mag: 34359738368, sign: false });
/// data.append(FP32x32 { mag: 38654705664, sign: false });
/// data.append(FP32x32 { mag: 42949672960, sign: false });
/// data.append(FP32x32 { mag: 47244640256, sign: false });
/// data.append(FP32x32 { mag: 51539607552, sign: false });
/// TensorTrait::new(shape.span(), data.span())
///
/// let tensor = TensorTrait::<FP32x32>::new(shape.span(), data.span());
///
/// return tensor.reduce_log_sum_exp(axis: 2, keepdims: false);
///
/// }
///
///
/// >>> [[9215828, 16323477, 20115004], [22716772, 24699744, 26302432]]
/// ```
///
fn reduce_log_sum_exp(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.erf
///
/// ```rust
/// fn erf(self: @Tensor<T>) -> Tensor<T>;
/// ```
///
/// Computes the mean of the input tensor's elements along the provided axes.
///
/// ## Returns
///
/// A new `Tensor<T>` of the same shape as the input tensor with
/// the the error function of the input tensor computed element-wise.
///
/// ## Type Constraints
///
/// Constrain input and output types to fixed point tensors.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor};
/// use orion::numbers::{FixedTrait, FP16x16};
///
/// fn erf_example() -> Tensor<FP16x16> {
/// // The erf inputs is [1.0, 0.134, 0.520, 2.0, 3.5, 5.164]
/// let tensor = TensorTrait::<FP16x16>::new(
/// shape: array![6].span(),
/// data: array![
/// FixedTrait::new_unscaled(65536, false),
/// FixedTrait::new_unscaled(8832, false),
/// FixedTrait::new_unscaled(34079, false),
/// FixedTrait::new_unscaled(131072, false),
/// FixedTrait::new_unscaled(229376, false),
/// FixedTrait::new_unscaled(338428, false),
/// ]
/// .span(),
/// );
///
/// return tensor.erf();
/// }
/// >>> [55227,9560,35252,65229,65536,65536]
/// ```
///
fn erf(self: @Tensor<T>) -> Tensor<T>;
/// # tensor.unique
///
/// ```rust
/// fn unique(self: @Tensor<T>, axis: Option<usize>, sorted: Option<bool>) -> (Tensor<T>, Tensor<i32>, Tensor<i32>, Tensor<i32>);
/// ```
///
/// Identifies the unique elements or subtensors of a tensor, with an optional axis parameter for subtensor slicing.
/// This function returns a tuple containing the tensor of unique elements or subtensors, and optionally,
/// tensors for indices, inverse indices, and counts of unique elements.
/// * `axis`(`Option<i32>`) - Specifies the dimension along which to find unique subtensors. A None value means the unique
/// elements of the tensor will be returned in a flattened form. A negative value indicates
/// dimension counting from the end.
/// * `sorted`(`Option<bool>`) - Determines if the unique elements should be returned in ascending order. Defaults to true.
///
/// ## Returns
///
/// A tuple containing:
/// * A Tensor<T> with unique values or subtensors from self.
/// * A Tensor<i32> with the first occurrence indices of unique elements in self. If axis is given, it returns indices
/// along that axis; otherwise, it refers to the flattened tensor.
/// * A Tensor<i32> mapping each element of self to its index in the unique tensor. If axis is specified, it maps to
/// the subtensor index; otherwise, it maps to the unique flattened tensor.
/// * A Tensor<i32> for the counts of each unique element or subtensor in self.
///
///
/// ## Example
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn unique_flat_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![1, 6].span(),
/// data: array![[2, 1, 1, 3, 4, 3]].span(),
/// );
///
/// return tensor.unique(
/// axis: Option::None(())
/// sorted: Option::Some(false)
/// );
/// }
/// >>> (
/// [2, 1, 3, 4],
/// [0, 1, 3, 4],
/// [0, 1, 1, 2, 3, 2],
/// [1, 2, 2, 1]
/// )
/// ```
///
/// or
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn unique_axis_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(),
/// data: array![[ 1, 0, 0],
/// [ 1, 0, 0],
/// [ 2, 3, 4]].span(),
/// );
///
/// return tensor.unique(
/// axis: Option::Some(0)
/// sorted: Option::Some(true)
/// );
/// }
/// >>> (
/// [[ 1, 0, 0],
/// [ 2, 3, 4]],
/// [0, 2],
/// [0, 0, 1],
/// [2, 1]
/// )
/// ```
///
fn unique(
self: @Tensor<T>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<T>, Tensor<i32>, Tensor<i32>, Tensor<i32>);
/// # tensor.gather_nd
///
/// ```rust
/// fn gather_nd(self: @Tensor<T>, indices: Tensor<T>, batch_dims: Option<usize>) -> Tensor<T>;
/// ```
///
/// Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `indices`(`Tensor<T>`) - Tensor of indices.
/// * `batch_dims`(`Option<usize>`) - The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:].
///
/// ## Panics
///
/// * Panics if index values are not within bounds [-s, s-1] along axis of size s.
/// * Panics if If indices_shape[-1] > r-b.
/// * Panics if first b dimensions of the shape of indices tensor and data tensor are not equal.
///
/// ## Returns
/// A new `Tensor<T>`.
///
/// ## Example
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn gather_nd_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2].span(),
/// data: array![[0, 1], [2, 3]].span(),
/// );
/// let indices = TensorTrait::<u32>::new(
/// shape: array![4, 1].span(),
/// data: array![[0], [0], [1], [1]].span(),
/// );
///
/// return tensor.gather_nd(
/// indices: indices,
/// axis: Option::Some((0)),
/// );
/// }
/// >>> [[0, 1],
/// [0, 1],
/// [2, 3],
/// [2, 3]]
/// ```
///
fn gather_nd(self: @Tensor<T>, indices: Tensor<usize>, batch_dims: Option<usize>) -> Tensor<T>;
/// # tensor.compress
///
/// ```rust
/// fn compress(self: @Tensor<T>, condition: Tensor<T>, axis: Option<usize>) -> Tensor<T>;
/// ```
///
/// Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. In case axis is not provided, input is flattened before elements are selected.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `condition`(`Tensor<T>`) - Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.
/// * `axis`(`Option<usize>`) - (Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).
///
/// ## Panics
///
/// * Panics if condition rank is not equal to 1.
///
/// ## Returns
///
/// A new `Tensor<T>` .
///
/// ## Example
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn compress_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![3, 2].span(),
/// data: array![[1, 2], [3, 4], [5, 6]].span(),
/// );
/// let condition = TensorTrait::<u32>::new(
/// shape: array![3].span(),
/// data: array![0, 1, 1].span(),
/// );
///
/// return tensor.compress(
/// condition: condition,
/// axis: Option::Some((0)),
/// );
/// }
/// >>> [[3, 4],
/// [5, 6]]
/// ```
///
fn compress(self: @Tensor<T>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<T>;
/// # tensor.layer_normalization
///
/// ```rust
/// fn layer_normalization(
/// self: @Tensor<T>,
/// scale: @Tensor<T>,
/// B: Option<@Tensor<T>>,
/// axis: Option<i32>,
/// epsilon: Option<T>,
/// stash_type: Option<usize>,
/// ) -> (Tensor<T>, Tensor<T>, Tensor<T>);
/// ```
///
/// Layer normalization of the input, in two stages.
/// The first stage is standardization, which makes the normalized elements have zero mean and unit variances.
/// The second stage then scales and shifts the outcome of the first stage
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `scale`(`@Tensor<T>,`) - Scale tensor.
/// * `B`(`Option<@Tensor<T>>`) - Bias tensor.
/// * `axis`(`Option<i32>`) (default is -1) - The first normalization dimension. If rank(X) is r, axis' allowed range is [-r, r). Negative value means counting dimensions from the back.
/// * `epsilon`(`Option<T>`) (default is 0) - The epsilon value to use to avoid division by zero.
/// * `stash_type`(`Option<usize>`) - Precise the computation precision - unused the precision is defined by the type of the tensor.
/// ## Panics
///
/// * Panics if condition rank is not equal to 1.
///
/// ## Returns
///
/// A new normalized tensor`Tensor<T>`.
/// A tensor containing the mean `Tensor<T>`.
/// A tensor containing the inverse standard deviation `Tensor<T>`.
///
/// ## Example
///
/// ```rust
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::operators::tensor::FP16x16TensorPartialEq;
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::FP16x16Tensor;
/// use orion::numbers::{FixedTrait, FP16x16};
///
/// fn layer_normalization_example() -> (Tensor<FP16x16>, Tensor<FP16x16>, Tensor<FP16x16>) {
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(3);
/// shape.append(4);
///
/// let mut data = ArrayTrait::new();
/// data.append(FP16x16 { mag: 41143, sign: true });
/// data.append(FP16x16 { mag: 51803, sign: false });
/// data.append(FP16x16 { mag: 113556, sign: false });
/// data.append(FP16x16 { mag: 64774, sign: false });
/// data.append(FP16x16 { mag: 866, sign: false });
/// data.append(FP16x16 { mag: 698, sign: true });
/// data.append(FP16x16 { mag: 106500, sign: false });
/// data.append(FP16x16 { mag: 98929, sign: false });
/// data.append(FP16x16 { mag: 7551, sign: false });
/// data.append(FP16x16 { mag: 30689, sign: true });
/// data.append(FP16x16 { mag: 38325, sign: false });
/// data.append(FP16x16 { mag: 48164, sign: false });
/// let X = TensorTrait::new(shape.span(), data.span());
///
/// let shape = ArrayTrait::<usize>::new();
/// shape.append(4);
/// let mut data = ArrayTrait::new();
/// data.append(FP16x16 { mag: 49855, sign: false });
/// data.append(FP16x16 { mag: 150787, sign: false });
/// data.append(FP16x16 { mag: 83498, sign: true });
/// data.append(FP16x16 { mag: 30346, sign: false });
/// let scale = TensorTrait::new(shape.span(), data.span());
///
///
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(4);
/// let mut data = ArrayTrait::new();
/// data.append(FP16x16 { mag: 54864, sign: true });
/// data.append(FP16x16 { mag: 50952, sign: false });
/// data.append(FP16x16 { mag: 8870, sign: true });
/// data.append(FP16x16 { mag: 23216, sign: true });
/// let bias = TensorTrait::new(shape.span(), data.span());
///
/// return X.layer_normalization(@scale,Option::Some(@bias),Option::None,Option::None,Option::None);
/// }
/// >>> [[-0.48926553 1.0185822 -0.02138367 -0.39223218]
/// [-0.7945549 0.99696046 0.04332176 -0.412645 ]
/// [-0.5664707 0.7491956 -0.7896356 -0.5320859 ]]
///
/// ```
///
fn layer_normalization(
self: @Tensor<T>,
scale: @Tensor<T>,
B: Option<@Tensor<T>>,
axis: Option<i32>,
epsilon: Option<T>,
stash_type: Option<usize>,
) -> (Tensor<T>, Tensor<T>, Tensor<T>);
/// # tensor.split
///
/// ```rust
/// fn split(self: @Tensor<T>, axis: usize, num_outputs: Option<usize>, split: Option<Tensor<usize>>
/// ) -> Array<Tensor<T>>;
/// ```
/// ## Args
/// Split a tensor into a list of tensors, along the specified ‘axis’
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The axis along which to split on.
/// * `num_outputs `(Option<usize>) - Number of outputs to split parts of the tensor into.
/// * `split `(Option<Tensor<usize>>) - Optional length of each output.
///
/// ## Panics
///
/// * Panics if the 'axis' accepted range is not [-rank, rank-1] where r = rank(input).
/// * Panics if the 'split' values not >= 0. Sum of the values is not equal to the dim value at ‘axis’ specified.
/// * Panics if the input 'split' or the attribute 'num_outputs' both are specified or not.
///
/// ## Returns
///
/// One or more outputs forming list of tensors after splitting.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// use core::option::OptionTrait;
/// fn split_tensor_example() -> Array<Tensor<u32>> {
/// let tensor: Tensor<u32> = TensorTrait::<u32>::new(
/// shape: array![2,4].span(),
/// data: array![
/// 0, 1, 2, 3, 4, 5, 6, 7
/// ].span(),
/// );
/// let num_outputs = Option::Some(2);
/// // split = Option::Some(array![1, 1].span());
/// let split_num: Option<Tensor<usize>> = Option::None(());
/// // We can call `split` function as follows.
/// return tensor.split(1, num_outputs, split_num);
/// }
/// >>> [[0,1],[4,5]]
/// [[2,3],[6,7]]
/// ```
///
fn split(
self: @Tensor<T>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<T>>;
/// # tensor.reverse_sequence
///
/// ```rust
/// fn reverse_sequence(self: @Tensor<T>, sequence_lens: @Tensor<i32>, batch_axis: Option<usize>, time_axis: Option<usize>) ->
/// Tensor<T>;
/// ```
///
/// Reverse batch of sequences having different lengths specified by sequence_lens.
///
/// * `self`(`@Array<Tensor<T>>`) - Tensor of rank r >= 2.
/// * `sequence_lens`(`@Tensor<T>`) - Tensor specifying lengths of the sequences in a batch. It has shape [batch_size].
/// * `batch_axis`(`Option<usize>`) - (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0.
/// * `time_axis`(`Option<usize>`) - (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1.
///
/// ## Panics
///
/// * Panics if the 'batch_axis' == 'time_axis'.
/// * Panics if the 'batch_axis' and 'time_axis' are not 0 and 1.
/// * Panics if the 'sequence_len' exceeding the sequence range.
///
/// ## Returns
///
/// Tensor with same shape of input.
///
/// ## Example
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// use core::option::OptionTrait;
/// fn reverse_sequence_example() -> Tensor<u32> {
/// let tensor: Tensor<u32> = TensorTrait::<u32>::new(
/// shape: array![4,4].span(),
/// data: array![
/// 0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16
/// ].span(),
/// );
/// let sequence_lens = TensorTrait::<usize>::new(array![4].span(), array![1,2,3,4].span());
/// let batch_axis = Option::Some(0);
/// let time_axis = Option::Some(1);
/// // We can call `split` function as follows.
/// return tensor.reverse_sequence(sequence_lens, batch_axis, time_axis);
/// }
/// >>> [
/// [0,1,2,3],
/// [5,4,6,7],
/// [10,9,8,11],
/// [15,14,13,12]
/// ]
/// ```
///
fn reverse_sequence(
self: @Tensor<T>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<T>;
/// # tensor.scatter_nd
///
/// ```rust
/// fn scatter_nd(self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, reduction: Option<usize>) -> Tensor<T>;
/// ```
///
/// Produces a copy of input data, and updates value to values specified by updates at specific index positions specified by indices.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `updates`(`Tensor<T>`) - The updates tensor.
/// * `indices`(`Tensor<T>`) - Tensor of indices.
/// * `reduction`(`Option<usize>`) - Reduction operation. Default: reduction='none'.
///
/// ## Panics
///
/// * Panics if index values are not within bounds [-s, s-1] along axis of size s.
/// * Panics if indices last axis is greater than data rank.
///
/// ## Returns
///
/// A new `Tensor<T>` .
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn scatter_nd_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![4, 4, 4].span(),
/// data: array![1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6,
/// 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4,
/// 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8].span()
/// );
///
/// let updates = TensorTrait::<u32>::new(
/// shape: array![2, 4, 4].span(),
/// data: array![5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 1, 1, 1, 1, 2, 2,
/// 2, 2, 3, 3, 3, 3, 4, 4, 4, 4].span(),
/// );
///
/// let indices = TensorTrait::<u32>::new(
/// shape: array![2, 1].span(),
/// data: array![0, 2].span(),
/// );
///
/// return tensor.scatter_nd(
/// updates: updates
/// indices: indices,
/// reduction: Option::Some('add'),
/// );
/// }
/// >>> [[[ 6., 7., 8., 9.],
/// [11., 12., 13., 14.],
/// [15., 14., 13., 12.],
/// [12., 11., 10., 9.]],
///
/// [[ 1., 2., 3., 4.],
/// [ 5., 6., 7., 8.],
/// [ 8., 7., 6., 5.],
/// [ 4., 3., 2., 1.]],
///
/// [[ 9., 8., 7., 6.],
/// [ 6., 5., 4., 3.],
/// [ 4., 5., 6., 7.],
/// [ 9., 10., 11., 12.]],
///
/// [[ 8., 7., 6., 5.],
/// [ 4., 3., 2., 1.],
/// [ 1., 2., 3., 4.],
/// [ 5., 6., 7., 8.]]]
/// ```
///
fn scatter_nd(
self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<T>;
/// # tensor.dynamic_quantize_linear
///
/// ```rust
/// fn dynamic_quantize_linear(self: @Tensor<T>) -> (Tensor::<Q>, Tensor<T>, Tensor<T>);
/// ```
///
/// Quantizes a Tensor using dynamic linear quantization.
///
/// The dynamic linear quantization operator. It consumes a high precision tensor
/// to compute the low precision / quantized tensor dynamicly.
/// Right now only uint8 is supported, it saturates to [0, 255].
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// A new `Tensor<Q>` with the same shape as the input tensor, containing the quantized values.
/// * `y_scale`(`@Tensor<T>`) - Scale for doing quantization to get `y`.
/// * `y_zero_point`(`@Tensor<T>`) - Zero point for doing quantization to get `y`.
///
/// ## Type Constraints
///
/// * `T` in (`Tensor<FP>`, `Tensor<i8>`, `Tensor<i32>`, `tensor<u32>`)
/// * `Q` in (`Tensor<i32>`)- Constrain `y` to 8-bit unsigned integer tensor.
///
/// ## Examples
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor};
/// use orion::numbers::{u8, i32, IntegerTrait};
///
/// fn dynamic_quantize_linear_example() -> (Tensor<u32>, Tensor<FP16x16>, Tensor<FP16x16>) {
/// // We instantiate a 1D Tensor here.
/// let x = TensorTrait::<FP16x16>::new(
/// shape: array![6].span(),
/// data: array![
/// FP16x16 { mag: 10945, sign: false },
/// FP16x16 { mag: 190054, sign: false },
/// FP16x16 { mag: 196608, sign: false },
/// FP16x16 { mag: 229376, sign: false },
/// FP16x16 { mag: 196608, sign: true },
/// FP16x16 { mag: 229376, sign: true },
/// ]
/// .span(),
/// );
///
/// return x.dynamic_quantize_linear();
/// }
/// >>> ([133, 233, 236, 255, -18, -0], [0.02745], [128]
/// ```
///
fn dynamic_quantize_linear(self: @Tensor<T>) -> (Tensor<u32>, Tensor<T>, Tensor<T>);
/// # tensor.optional
///
/// ```rust
/// fn optional(self: @Tensor<T>) -> Option<Tensor<T>>;
/// ```
///
/// Constructs an optional-type value containing either an empty optional of a certain
/// type specified by the attribute, or a non-empty value containing the input element.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
///
/// ## Returns
///
/// The optional output enclosing the input element.
///
/// ## Examples
///
/// ```rust
/// use core::option::OptionTrait;
/// fn optional_example() -> Option<Tensor<T>> {
/// let a = TensorTrait::<
/// FP16x16
/// >::new(
/// shape: array![4, 2].span(),
/// data: array![
/// 1_i8,
/// 2_i8,
/// 3_i8,
/// 4_i8,
/// 5_i8,
/// 6_i8,
/// 7_i8,
/// 8_i8
/// ].span(),
/// );
/// a.optional()
/// }
/// >>> Option[Tensor[1,2,3,4,5,6,7,8]]
///
/// ```
///
fn optional(self: @Tensor<T>) -> Option<Tensor<T>>;
/// # tensor.split_to_sequence
///
/// ```rust
/// fn split_to_sequence(
/// self: @Tensor<T>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
/// ) -> Array<Tensor<T>>;
/// ```
///
/// Split a tensor into a sequence of tensors, along the specified ‘axis’
///
///
/// ## Args
/// * `self`(`@Tensor<T>`) - The input tensor to split.
/// * `axis`(`usize`) - The axis along which to split on.
/// * `keepdims `(`usize`) - Keep the split dimension or not. If input ‘split’ is specified, this attribute is ignored.
/// * `split `(`Option<Tensor<usize>>`) - Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0.
///
/// ## Panics
///
/// * Panics if the 'axis' accepted range is not [-rank, rank-1] where r = rank(input).
/// * Panics if the 'split' is not either a scalar (tensor of empty shape), or a 1-D tensor.
///
/// ## Returns
///
/// One or more outputs forming a sequence of tensors after splitting.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// use core::option::OptionTrait;
/// fn split_to_sequence_example() -> Array<Tensor<u32>> {
/// let tensor: Tensor<u32> = TensorTrait::<u32>::new(
/// shape: array![2,4].span(),
/// data: array![
/// 0, 1, 2, 3, 4, 5, 6, 7
/// ].span(),
/// );
/// let num_outputs = Option::Some(2);
/// // let split = Option::Some(TensorTrait::new(array![1].span(), array![2].span()));
/// let split: Option<Tensor<usize>> = Option::Some(TensorTrait::new(array![2].span(), array![2, 2].span()));
/// // We can call `split_to_sequence` function as follows.
/// return tensor.split_to_sequence(1, 1, split);
/// }
/// >>> [
/// [[0,1],[4,5]],
/// [[2,3],[6,7]]
/// ]
/// ```
///
fn split_to_sequence(
self: @Tensor<T>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<T>>;
/// # tensor.range
///
/// ```rust
/// fn range(start: T, end: T, step: T) -> Tensor<T>;
/// ```
///
/// Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive).
///
///
/// * `start`(`T`) - First entry for the range of output values.
/// * `end`(`T`) - Exclusive upper limit for the range of output values.
/// * `step `(`T`) - Value to step by.
///
/// ## Returns
///
/// A 1-D tensor with same type as the inputs containing generated range of values.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::I32TensorPartialEq;
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::operators::tensor::{I32Tensor, I32TensorAdd};
/// use orion::utils::{assert_eq, assert_seq_eq};
/// use orion::numbers::NumberTrait;
///
///
/// fn range_example() -> Tensor<i32> {
/// return TensorTrait::range(21,2,-3);
/// }
/// >>> [21 18 15 12 9 6 3]
/// ```
///
fn range(start: T, end: T, step: T) -> Tensor<T>;
/// # tensor.hann_window
///
/// ```rust
/// fn hann_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// ```
///
/// Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106.
///
///
/// * `size`(`T`) - A scalar value indicating the length of the window.
/// * `periodic`(Option<usize>) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1.
///
/// ## Returns
///
/// A Hann window with length: size. The output has the shape: [size].
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::FP8x23TensorPartialEq;
/// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd};
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::utils::{assert_eq, assert_seq_eq};
/// use orion::numbers::{FixedTrait, FP8x23};
///
///
/// fn hann_window_example() -> Tensor<FP8x23> {
/// return TensorTrait::hann_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4
/// }
/// >>> [0 6291455 6291456 0]
/// ```
///
fn hann_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// # tensor.hamming_window
///
/// ```rust
/// fn hamming_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// ```
///
/// Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106.
///
///
/// * `size`(`T`) - A scalar value indicating the length of the window.
/// * `periodic`(Option<usize>) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1.
///
/// ## Returns
///
/// A Hamming window with length: size. The output has the shape: [size].
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::FP8x23TensorPartialEq;
/// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd};
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::utils::{assert_eq, assert_seq_eq};
/// use orion::numbers::{FixedTrait, FP8x23};
///
///
/// fn hamming_window_example() -> Tensor<FP8x23> {
/// return TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4
/// }
/// >>> [729444 6473817 6473817 729444]
/// ```
///
fn hamming_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// # tensor.blackman_window
///
/// ```rust
/// fn blackman_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// ```
///
/// Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106.
///
///
/// * `size`(`T`) - A scalar value indicating the length of the window.
/// * `periodic`(Option<usize>) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1.
///
/// ## Returns
///
/// A Blackman window with length: size. The output has the shape: [size].
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::FP8x23TensorPartialEq;
/// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd};
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::utils::{assert_eq, assert_seq_eq};
/// use orion::numbers::{FixedTrait, FP8x23};
///
///
/// fn blackman_window_example() -> Tensor<FP8x23> {
/// return TensorTrait::blackman_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4
/// }
/// >>> [0 0.36 0.36 0]
/// ```
///
fn blackman_window(size: T, periodic: Option<usize>) -> Tensor<T>;
/// # TensorTrait::random_uniform_like
///
/// ```rust
/// fn random_uniform_like(tensor: @Tensor<T>, high: Option<T>, low: Option<T>, seed: Option<usize>) -> Tensor<T>;
/// ```
///
/// RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor.
///
/// This operation creates a new tensor with the same shape as the input tensor, where each element is initialized with a random value sampled from a uniform distribution.
///
/// ## Args
///
/// * `tensor`(`@Tensor<T>`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.
/// * `high`(Option<T>) - An optional parameter specifying the upper bound (exclusive) of the uniform distribution. If not provided, defaults to 1.0.
/// * `low`(Option<T>) - An optional parameter specifying the lower bound (inclusive) of the uniform distribution. If not provided, defaults to 0.0.
/// * `seed`(Option<usize>) - An optional parameter specifying the seed for the random number generator. If not provided, a random seed will be used.
///
/// ## Returns
///
/// * A `Tensor<T>` with the same shape as the input tensor, filled with random values from a uniform distribution within the specified range.
///
/// ## Examples
///
/// ```rust
/// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd};
/// use core::array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::{TensorTrait, Tensor};
/// use orion::utils::{assert_eq, assert_seq_eq};
/// use orion::operators::tensor::FP8x23TensorPartialEq;
/// use orion::numbers::{FixedTrait, FP8x23};
///
///
/// fn example() -> Tensor<FP8x23> {
/// let mut shape = ArrayTrait::<usize>::new();
/// shape.append(1);
/// shape.append(8);
/// shape.append(1);
/// shape.append(2);
///
/// let mut data = ArrayTrait::new();
/// data.append(FP8x23 { mag: 70016, sign: true });
/// data.append(FP8x23 { mag: 57536, sign: false });
/// data.append(FP8x23 { mag: 116032, sign: false });
/// data.append(FP8x23 { mag: 162944, sign: true });
/// data.append(FP8x23 { mag: 43360, sign: false });
/// data.append(FP8x23 { mag: 128960, sign: false });
/// data.append(FP8x23 { mag: 151808, sign: true });
/// data.append(FP8x23 { mag: 28368, sign: false });
/// data.append(FP8x23 { mag: 21024, sign: false });
/// data.append(FP8x23 { mag: 24992, sign: false });
/// data.append(FP8x23 { mag: 125120, sign: true });
/// data.append(FP8x23 { mag: 79168, sign: true });
/// data.append(FP8x23 { mag: 136960, sign: true });
/// data.append(FP8x23 { mag: 10104, sign: true });
/// data.append(FP8x23 { mag: 136704, sign: false });
/// data.append(FP8x23 { mag: 184960, sign: true });
/// let tensor = TensorTrait::new(shape.span(), data.span());
/// return TensorTrait::random_uniform_like(@tensor, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145));
/// }
/// >>> [[[[7299130, 4884492]], [[2339070, 1559536]], [[3448557, 984617]], [[5745934, 3670947]], [[4665989, 3079292]], [[3375288, 948254]], [[3749966, 4911069]], [[1358829, 4368105]]]]
/// ```
///
fn random_uniform_like(
tensor: @Tensor<T>, high: Option<T>, low: Option<T>, seed: Option<usize>
) -> Tensor<T>;
/// # tensor.label_encoder
///
/// ```rust
/// fn label_encoder(self: @Tensor<T>, default_list: Option<Span<T>>, default_tensor: Option<Tensor<T>>, keys: Option<Span<T>>, keys_tensor: Option<Tensor<T>>, values: Option<Span<T>>, values_tensor: Option<Tensor<T>>) -> Tensor<T>;
/// ```
///
/// Maps each element in the input tensor to another value.
///
/// The mapping is determined by the two parallel attributes, 'keys_' and 'values_' attribute.
/// The i-th value in the specified 'keys_' attribute would be mapped to the i-th value in the specified 'values_' attribute.
/// It implies that input's element type and the element type of the specified 'keys_' should be identical while the output type is identical to the specified 'values_' attribute.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `default_list`(`Option<Span<T>>`) - The default span.
/// * `default_tensor`(`Option<Tensor<T>>`) - The default tensor.
/// * `keys`(`Option<Span<T>>`) - The keys span.
/// * `keys_tensor`(`Option<Tensor<T>>`) - The keys tensor.
/// * `values`(` Option<Span<T>>`) - The values span.
/// * `values_tensor`(`Option<Tensor<T>>`) - The values tensor.
///
/// One and only one of 'default_*'s should be set
/// One and only one of 'keys*'s should be set
/// One and only one of 'values*'s should be set.
///
/// ## Panics
///
/// * Panics if the len/shape of keys and values are not the same.
///
/// ## Returns
///
/// A new `Tensor<T>` which maps each element in the input tensor to another value..
///
/// ## Type Constraints
///
/// * `T` in (`Tensor<FP>`, `Tensor<i8>`, `Tensor<i32>`, `tensor<u32>,`)
///
/// ## Examples
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
/// use orion::operators::tensor::U32Tensor;
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn label_encoder_example() -> Tensor<T>, {
/// fn data() -> Tensor<u32> {
/// let mut sizes = ArrayTrait::new();
/// sizes.append(2);
/// sizes.append(3);
/// let mut data = ArrayTrait::new();
/// data.append(1);
/// data.append(2);
/// data.append(3);
/// data.append(1);
/// data.append(4);
/// data.append(5);
///
/// let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
/// return tensor;
/// }
///
/// fn keys() -> Tensor<u32> {
/// let mut sizes = ArrayTrait::new();
/// sizes.append(3);
/// sizes.append(1);
///
/// let mut data = ArrayTrait::new();
/// data.append(1);
/// data.append(2);
/// data.append(1);
///
/// let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
/// return tensor;
/// }
///
/// fn values() -> Tensor<u32> {
/// let mut sizes = ArrayTrait::new();
/// sizes.append(3);
/// sizes.append(1);
///
/// let mut data = ArrayTrait::new();
/// data.append(8);
/// data.append(9);
/// data.append(7);
///
/// let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
/// return tensor;
/// }
///
/// fn default() -> Tensor<u32> {
/// let mut sizes = ArrayTrait::new();
/// sizes.append(1);
///
/// let mut data = ArrayTrait::new();
/// data.append(999);
///
/// let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
/// return tensor;
/// }
///
/// let data = data();
/// let keys = keys();
/// let values = values();
/// let default = default();
/// return data.label_encoder(default_list: Option::None, default_tensor: Option::Some(default),
/// keys: Option::None, keys_tensor: Option::Some(keys),
/// values: Option::None, values_tensor: Option::Some(values));
/// >>> [7, 9, 999, 7, 999, 999],
/// ```
///
fn label_encoder(
self: @Tensor<T>,
default_list: Option<Span<T>>,
default_tensor: Option<Tensor<T>>,
keys: Option<Span<T>>,
keys_tensor: Option<Tensor<T>>,
values: Option<Span<T>>,
values_tensor: Option<Tensor<T>>
) -> Tensor<T>;
}
/// Cf: TensorTrait::new docstring
fn new_tensor<T>(shape: Span<usize>, data: Span<T>) -> Tensor<T> {
check_shape::<T>(shape, data);
Tensor::<T> { shape, data }
}
/// Cf: TensorTrait::constant_of_shape docstring
fn constant_of_shape<T, impl FCopy: Copy<T>, impl FDrop: Drop<T>,>(
shape: Span<usize>, value: T
) -> Tensor<T> {
let mut data = ArrayTrait::new();
let mut length = len_from_shape(shape);
loop {
match length.into() {
0 => { break (); },
_ => {
data.append(value.clone());
length -= 1;
}
}
};
Tensor::<T> { shape, data: data.span() }
}
/// Cf: TensorTrait::ravel_index docstring
fn ravel_index(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
assert(shape.len() == indices.len(), 'shape & indices length unequal');
let mut raveled_index: usize = 0;
let mut stride: usize = 1;
loop {
match shape.pop_back() {
Option::Some(i) => {
let index = *indices.pop_back().unwrap();
raveled_index += index * stride;
stride *= *i;
},
Option::None => { break; }
};
};
raveled_index
}
/// Cf: TensorTrait::unravel_index docstring
fn unravel_index(index: usize, mut shape: Span<usize>) -> Span<usize> {
assert(shape.len() > 0, 'shape cannot be empty');
let mut result = ArrayTrait::new();
let mut remainder = index;
let mut stride = len_from_shape(shape);
loop {
match shape.pop_front() {
Option::Some(i) => {
stride /= *i;
let coord = remainder / stride;
remainder = remainder % stride;
result.append(coord);
},
Option::None => { break; }
};
};
return result.span();
}
/// Cf: TensorTrait::stride docstring
fn stride(mut shape: Span<usize>) -> Span<usize> {
let mut strides = ArrayTrait::new();
let mut stride = 1;
loop {
match shape.pop_back() {
Option::Some(size) => {
strides.append(stride);
stride *= *size;
},
Option::None => { break; }
};
};
strides.reverse().span()
}
/// Cf: TensorTrait::reshape docstring
fn reshape<T, +Copy<Tensor<T>>>(
self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool
) -> Tensor<T> {
// Calculate the total number of elements in the original tensor
let mut total_elements = 1;
let mut shape = *self.shape;
loop {
match shape.pop_front() {
Option::Some(val) => total_elements *= *val,
Option::None => { break; }
};
};
// Calculate 'elements_so_far' and find 'inferred_index'
let mut elements_so_far = 1;
let mut inferred_index = Option::None;
let mut target_shape_clone = target_shape.clone();
let mut i: usize = 0;
loop {
match target_shape_clone.pop_front() {
Option::Some(dim) => {
if *dim == -1 {
if inferred_index.is_none() {
inferred_index = Option::Some(i);
} else {
panic!("Only one dimension can be inferred");
}
} else if *dim == 0 && allowzero == false {
// When allowzero is not set, copy the dimension size from the original tensor
if i >= (*self.shape).len() {
panic!("Dimension out of bounds for using original dimension value");
}
elements_so_far *= *(*self).shape.at(i);
} else if *dim >= 0 {
elements_so_far *= (*dim).try_into().unwrap();
} else {
panic!("Invalid dimension size");
};
},
Option::None => { break; }
};
i += 1;
};
let mut target_shape_clone = target_shape.clone();
let mut inferred_shape = ArrayTrait::<u32>::new();
i = 0; // Reset the index for the next loop
loop {
match target_shape_clone.pop_front() {
Option::Some(dim) => {
if *dim == -1 {
inferred_shape.append(total_elements / elements_so_far) // Inferred dimension
} else if *dim == 0 {
if allowzero == true {
inferred_shape
.append(
0
) // Explicitly set the dimension to zero when allowzero is enabled
} else if i < (*self.shape).len() {
inferred_shape
.append(
*(*self).shape.at(i)
) // Dimension unchanged from original when allowzero is not enabled
} else {
panic!("Dimension out of bounds for using original dimension value");
}
} else {
inferred_shape
.append((*dim).try_into().unwrap()) // Directly specified dimension
};
},
Option::None => { break; }
}
i += 1;
};
new_tensor(inferred_shape.span(), *self.data)
}
/// Cf: TensorTrait::at docstring
fn at_tensor<T>(self: @Tensor<T>, indices: Span<usize>) -> @T {
assert(indices.len() == (*self.shape).len(), 'indices not match dimensions');
let data = *self.data;
return data.at(ravel_index(*self.shape, indices));
}
// Return true if two tensor are equal
fn tensor_eq<T, impl TPartialEq: PartialEq<T>>(mut lhs: Tensor<T>, mut rhs: Tensor<T>,) -> bool {
let mut is_eq = true;
loop {
if lhs.shape.len() == 0 || !is_eq {
break;
}
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
loop {
if lhs.data.len() == 0 || !is_eq {
break;
}
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
return is_eq;
}
/// Cf: TensorTrait::slice docstring
fn slice<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<T> {
let axes = match axes {
Option::Some(axes) => axes,
Option::None => {
let mut ret: Array<usize> = ArrayTrait::new();
let mut i: usize = 0;
let stop_i = starts.len() - 1;
loop {
ret.append(i);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
},
};
let steps = match steps {
Option::Some(steps) => steps,
Option::None => {
let mut ret: Array<usize> = ArrayTrait::new();
let mut i: usize = 0;
let stop_i = starts.len() - 1;
loop {
ret.append(1);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
},
};
assert(starts.len() == ends.len(), 'Ends and starts len unequal');
assert(starts.len() == axes.len(), 'Axes and starts len unequal');
assert(starts.len() == steps.len(), 'Steps and starts len unequal');
let mut is_empty: bool = false;
let mut output_shape: Array<usize> = ArrayTrait::new();
let mut processed_starts: Array<usize> = ArrayTrait::new();
let mut processed_ends: Array<usize> = ArrayTrait::new();
let mut processed_steps: Array<usize> = ArrayTrait::new();
let mut shape = *self.shape;
let mut i: usize = 0;
loop {
match shape.pop_front() {
Option::Some(ele) => {
let (axis_index, is_found) = match axes.index_of(i) {
Option::Some(axis_index) => (axis_index, true),
Option::None => (0, false),
};
let mut processed_params = (0, 0, 0, 0);
if is_found {
let mut start: usize = *ele;
let mut end: usize = *ele;
if *starts.at(axis_index) < *ele {
start = *starts.at(axis_index);
}
if *ele > *ends.at(axis_index) {
end = *ends.at(axis_index);
};
if start > *ele {
start = *ele;
};
if end > *ele {
end = *ele;
};
if start >= end {
is_empty = true;
} else {
let dim = (end - start + (*steps.at(axis_index) - 1))
/ *steps.at(axis_index);
if dim == 0 {
is_empty = true;
} else {
processed_params = (start, end, *steps.at(axis_index), dim);
};
};
} else {
processed_params = (0, *ele, 1, *ele);
}
let (start, end, step, shape) = processed_params;
processed_starts.append(start);
processed_ends.append(end);
processed_steps.append(step);
output_shape.append(shape);
i += 1;
},
Option::None => { break; }
};
};
let mut output_data: Array<T> = ArrayTrait::new();
if is_empty {
return Tensor::<T> { shape: output_shape.span(), data: output_data.span() };
}
let mut data = *self.data;
let mut j: usize = 0;
loop {
match data.pop_front() {
Option::Some(ele) => {
let mut indices = unravel_index(j, *self.shape);
let mut is_included = false;
let mut shape = *self.shape;
let mut starts = processed_starts.span();
let mut ends = processed_ends.span();
let mut steps = processed_steps.span();
loop {
match shape.pop_front() {
Option::Some => {
let start = *starts.pop_front().unwrap();
let end = *ends.pop_front().unwrap();
let step = *steps.pop_front().unwrap();
let index = *indices.pop_front().unwrap();
if index < start || index >= end {
is_included = false;
break ();
}
if (index - start) % step == 0 {
is_included = true;
} else {
is_included = false;
break ();
}
},
Option::None => { break; }
};
};
if is_included {
output_data.append(*ele);
}
j += 1;
},
Option::None => { break; }
};
};
return TensorTrait::new(output_shape.span(), output_data.span());
}
/// Cf: TensorTrait::nonzero docstring
fn nonzero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl TNumber: NumberTrait<T, MAG>
>(
self: @Tensor<T>
) -> Tensor<usize> {
let mut indexes_of_dimensions: Array<usize> = ArrayTrait::new();
let mut self_data_copy = *self.data;
let mut j: usize = 0;
loop {
match self_data_copy.pop_front() {
Option::Some(val) => {
if *val != NumberTrait::zero() {
let indices = unravel_index(j, *self.shape);
let mut i: usize = 0;
let mut self_shape_copy = *self.shape;
loop {
match self_shape_copy.pop_front() {
Option::Some => {
indexes_of_dimensions.append(*indices.at(i));
i += 1;
},
Option::None => { break (); }
};
};
}
j += 1;
},
Option::None => { break (); }
};
};
let indexes_of_dimensions_span = indexes_of_dimensions.span();
let mut output_data: Array<usize> = ArrayTrait::new();
if indexes_of_dimensions_span.len() == 0 {
return Tensor::<
usize
> { shape: array![(*self.shape).len(), 0].span(), data: output_data.span() };
}
let stop_k = (indexes_of_dimensions_span.len() / (*self.shape).len()) - 1;
let mut self_shape_copy = *self.shape;
let mut i: usize = 0;
loop {
match self_shape_copy.pop_front() {
Option::Some => {
let mut k: usize = 0;
loop {
output_data.append(*indexes_of_dimensions_span.at((*self.shape).len() * k + i));
if k == stop_k {
break ();
}
k += 1;
};
i += 1;
},
Option::None => { break (); }
};
};
return Tensor::<
usize
> { shape: array![(*self.shape).len(), stop_k + 1].span(), data: output_data.span() };
}
/// Cf: TensorTrait::squeeze docstring
fn squeeze<T>(self: @Tensor<T>, axes: Option<Span<u32>>) -> Tensor<T> {
let target_shape = match axes {
Option::Some(mut axes) => {
let mut axis_squeezed = 0;
let mut shape = *self.shape;
loop {
match axes.pop_front() {
Option::Some(axis) => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut index = 0;
let axis = if *axis < 0 {
assert(
*axis <= (*self.shape).len().into(), 'axis out of accepted range'
);
(*self.shape).len().into() - *axis
} else {
assert(
*axis < (*self.shape).len().into(), 'axis out of accepted range'
);
*axis
};
loop {
match shape.pop_front() {
Option::Some(shape) => {
let squeezed = if axis >= axis_squeezed {
axis - axis_squeezed
} else {
axis
};
if index == squeezed {
assert(*shape == 1, 'shape entry not equal to one');
axis_squeezed += 1;
} else {
reshape.append(*shape);
}
},
Option::None => { break; },
};
index += 1;
};
shape = reshape.span();
},
Option::None => { break shape; },
};
}
},
Option::None => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut shape = *self.shape;
loop {
match shape.pop_front() {
Option::Some(shape) => { if *shape != 1 {
reshape.append(*shape);
} },
Option::None => { break reshape.span(); },
};
}
},
};
return Tensor::<T> { shape: target_shape, data: *self.data };
}
/// Cf: TensorTrait::unsqueeze docstring
fn unsqueeze<T>(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T> {
let dedupped_array = axes.dedup();
assert(dedupped_array.len() == axes.len(), 'Duplicated input axes');
let mut self_shape_copy = *self.shape;
let mut i: usize = 0;
let mut added_axes_count: usize = 0;
let mut output_shape: Array<usize> = ArrayTrait::new();
loop {
if axes.contains(i + added_axes_count) {
output_shape.append(1);
added_axes_count += 1;
} else {
match self_shape_copy.pop_front() {
Option::Some(val) => {
output_shape.append(*val);
i += 1;
},
Option::None => { break (); }
};
};
};
let mut j: usize = output_shape.len();
loop {
if axes.contains(j) {
output_shape.append(1);
} else {
break ();
}
j += 1;
};
assert(output_shape.len() == axes.len() + (*self.shape).len(), 'Invalid input axes');
return Tensor::<T> { shape: output_shape.span(), data: *self.data };
}
/// Cf: TensorTrait::sign docstring
fn sign<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut sign_data_array: Array<T> = ArrayTrait::new();
let mut data = *self.data;
loop {
match data.pop_front() {
Option::Some(data) => {
let sign_data = if *data == NumberTrait::zero() {
NumberTrait::zero()
} else if NumberTrait::is_neg(*data) {
NumberTrait::neg_one()
} else {
NumberTrait::one()
};
sign_data_array.append(sign_data);
},
Option::None => {
break Tensor::<T> { shape: *self.shape, data: sign_data_array.span() };
}
};
}
}
/// Cf: TensorTrait::clip docstring
fn clip<
T,
MAG,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TTensor: TensorTrait<T>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>
>(
self: @Tensor<T>, min: Option<T>, max: Option<T>
) -> Tensor<T> {
let min = match min {
Option::Some(min) => min,
Option::None => { NumberTrait::min_value() },
};
let max = match max {
Option::Some(max) => max,
Option::None => { NumberTrait::max_value() },
};
let mut return_data: Array<T> = ArrayTrait::new();
let mut self_data_copy = *self.data;
loop {
match self_data_copy.pop_front() {
Option::Some(val) => {
if *val < min {
return_data.append(min);
} else if *val > max {
return_data.append(max);
} else {
return_data.append(*val);
}
},
Option::None => { break (); }
};
};
return Tensor::<T> { shape: *self.shape, data: return_data.span() };
}
/// Cf: TensorTrait::identity docstring
fn identity<T>(self: @Tensor<T>) -> Tensor<T> {
Tensor::<T> { shape: *self.shape, data: *self.data }
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/helpers.cairo | use alexandria_data_structures::array_ext::ArrayTraitExt;
use orion::utils::u32_max;
use orion::operators::tensor::{core::{Tensor, TensorTrait, stride}, BoolTensor};
/// Calculates the number of elements in a tensor given its shape.
///
/// # Arguments
/// * `shape` - A span containing the shape of the tensor as usize elements.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize representing the number of elements in the tensor.
fn len_from_shape(mut shape: Span<usize>) -> usize {
let mut result: usize = 1;
loop {
match shape.pop_front() {
Option::Some(item) => { result *= *item; },
Option::None => { break; }
};
};
result
}
/// Verifies if the shape and the data array of a tensor are compatible.
///
/// # Arguments
/// * `shape` - A span containing the shape of the tensor as usize elements.
/// * `data` - A span containing the data elements of generic type T.
///
/// # Panics
/// * Panics if the shape and data array are incompatible.
fn check_shape<T>(shape: Span<usize>, data: Span<T>) {
assert(len_from_shape(shape) == data.len(), 'wrong tensor shape');
}
/// Checks if two tensor shapes are compatible for broadcasting.
///
/// # Arguments
/// * `shape_1` - A span containing the first tensor's shape as usize elements.
/// * `shape_2` - A span containing the second tensor's shape as usize elements.
///
/// # Panics
/// * Panics if the shapes are not compatible for broadcasting.
fn check_compatibility(mut shape_1: Span<usize>, mut shape_2: Span<usize>) {
// Start from the last dimension by getting the length of each shape
let mut iter_1 = shape_1.len();
let mut iter_2 = shape_2.len();
// Iterate while there are dimensions left in either shape
while iter_1 > 0 || iter_2 > 0 {
// Get the current dimension for each shape, defaulting to 1 if we've run out of dimensions
let dim_1 = if iter_1 > 0 {
*shape_1[iter_1 - 1]
} else {
1
};
let dim_2 = if iter_2 > 0 {
*shape_2[iter_2 - 1]
} else {
1
};
// Check the broadcasting rule for the current dimension
if dim_1 != dim_2 && dim_1 != 1 && dim_2 != 1 {
panic(array!['tensors shape must match']);
}
// Move to the next dimension
if iter_1 > 0 {
iter_1 -= 1;
}
if iter_2 > 0 {
iter_2 -= 1;
}
}
}
/// Computes the index in the broadcasted tensor corresponding to the given indices and shape.
///
/// # Arguments
/// * `shape` - A span containing the shape of the tensor as usize elements.
/// * `indices` - A span containing the indices as usize elements.
///
/// # Panics
/// * Panics if shape and indices length are not equal.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize representing the index in the broadcasted tensor.
fn broadcast_index_mapping(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
if shape.len() == indices.len() {
broadcast_index_mapping_equal_shape(shape, indices)
} else {
broadcast_index_mapping_non_equal_shape(shape, indices)
}
}
fn broadcast_index_mapping_equal_shape(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
let mut result = 0_usize;
let mut stride = stride(shape);
loop {
match shape.pop_front() {
Option::Some(shape_val) => {
let indices_val = *indices.pop_front().unwrap();
let stride_val = *stride.pop_front().unwrap();
let index = (indices_val % *shape_val) * stride_val;
result += index;
},
Option::None => { break; }
};
};
result
}
fn broadcast_index_mapping_non_equal_shape(
mut shape: Span<usize>, mut indices: Span<usize>
) -> usize {
let mut result = 0_usize;
let mut stride = stride(shape.clone());
// Calculate the offset to align indices with the rightmost dimensions of the shape
let mut offset = if shape.len() > indices.len() {
shape.len() - indices.len()
} else {
0
};
loop {
match shape.pop_back() {
Option::Some(_) => {
let stride_val = stride
.pop_back()
.unwrap_or(@1); // Default stride for non-existent dimensions is 1
// Calculate the index, using 0 for dimensions beyond the length of indices
let index_val = if offset > 0 {
offset -= 1; // Decrement offset until we align indices with the shape
0 // Use 0 for indices beyond the length of the indices span
} else {
*indices
.pop_back()
.unwrap_or(@0) // Use actual index value or 0 if indices are exhausted
};
let index = index_val * *stride_val;
result += index;
},
Option::None => { break; }
};
};
result
}
/// Generates the output shape after reducing a tensor along a specified axis.
///
/// # Arguments
/// * `input_shape` - A span containing the input tensor's shape as usize elements.
/// * `axis` - A usize representing the axis to reduce.
///
/// # Panics
/// * Panics if input_shape is empty.
/// * Panic if the axis is not in the valid range of the input_shape dimensions.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A Span of usize representing the output shape after reduction.
fn reduce_output_shape(mut input_shape: Span<usize>, axis: usize, keepdims: bool) -> Span<usize> {
assert(axis < input_shape.len(), 'axis out of dimensions');
let mut output_shape: Array<u32> = array![];
let mut n: usize = 0;
loop {
match input_shape.pop_front() {
Option::Some(current_dim) => {
if n == axis {
if keepdims {
output_shape.append(1);
}
} else {
output_shape.append(*current_dim);
}
n += 1;
},
Option::None => { break; }
};
};
output_shape.span()
}
/// Helper function that computes the output shape of a tensor after applying the axes permutation.
///
/// # Arguments
/// * `input_shape` - A span containing the input tensor's shape as usize elements.
/// * `axes` - A span containing the usize elements representing the axes permutation.
///
/// # Panics
/// * Panics if shape and axes length are not equal.
/// * Panic if the axis value in axes is not in the valid range of the input_shape dimensions.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A Span of usize representing the output shape after permutation.
fn permutation_output_shape(input_shape: Span<usize>, mut axes: Span<usize>) -> Span<usize> {
let axes_len = axes.len();
assert(input_shape.len() == axes_len, 'input_shape/indices len unequal');
let mut output_shape: Array<u32> = array![];
loop {
match axes.pop_front() {
Option::Some(item) => { output_shape.append(*input_shape[*item]); },
Option::None => { break; }
};
};
output_shape.span()
}
/// Combines output indices with the current index of the specified axis.
///
/// # Arguments
/// * `output_indices` - A span containing the output indices as usize elements.
/// * `axis_index` - A usize representing the current index of the specified axis.
/// * `axis` - A usize representing the specified axis.
///
/// # Panics
/// * Panics if the axis value is not in the range of the output_indices length.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A Span of usize representing the combined indices.
fn combine_indices(mut output_indices: Span<usize>, axis_index: usize, axis: usize) -> Span<usize> {
assert(axis <= output_indices.len(), 'axis value is out of range');
let mut result: Array<u32> = array![];
let mut n: usize = 0;
while n != output_indices.len() + 1 {
if n == axis {
result.append(axis_index);
} else if n > axis {
result.append(*output_indices[n - 1_usize]);
} else {
result.append(*output_indices[n]);
}
n += 1;
};
result.span()
}
/// Helper function that finds the index of a target axis in the given axes array.
///
/// # Arguments
/// * `axes` - A span containing the usize elements representing the axes.
/// * `target_axis` - A usize representing the target axis.
///
/// # Panics
/// * Panics if the target_axis value is not in the range of the axes dimensions.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize representing the index of the target axis in the given axes array.
fn find_axis(mut axes: Span<usize>, target_axis: usize) -> usize {
assert(target_axis < axes.len(), 'target_axis is out of range');
let mut axis: usize = 0;
loop {
match axes.pop_front() {
Option::Some(item) => {
if *item == target_axis {
break ();
}
axis += 1;
},
Option::None => { break; }
};
};
axis
}
/// Computes the broadcasted shape of two tensors.
///
/// # Arguments
/// * `shape1` - A span containing the shape of the first tensor as usize elements.
/// * `shape2` - A span containing the shape of the second tensor as usize elements.
///
/// # Panics
/// * Panics if the shapes of the tensors are not compatible.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A Span of usize representing the broadcasted shape.
fn broadcast_shape(mut shape1: Span<usize>, mut shape2: Span<usize>) -> Span<usize> {
check_compatibility(shape1, shape2);
let mut result: Array<usize> = array![];
while !shape1.is_empty() || !shape2.is_empty() {
let dim1 = *shape1.pop_back().unwrap_or(@1);
let dim2 = *shape2.pop_back().unwrap_or(@1);
let broadcasted_dim = u32_max(dim1, dim2);
result.append(broadcasted_dim);
};
result.reverse().span()
}
/// Substitute a value in a shape at a given index
///
/// # Arguments
///
/// * `shape` - The shape to modify
/// * `index` - The index to modify
/// * `value` - The value to insert
///
/// # Panics
/// * Panics if the index is out of bounds
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * `Span<usize>` - The modified shape
fn replace_index(mut shape: Span<usize>, index: usize, value: usize) -> Span<usize> {
let mut output: Array<u32> = array![];
let mut i = 0;
loop {
match shape.pop_front() {
Option::Some(item) => {
if i == index {
output.append(value);
} else {
output.append(*item);
};
i += 1;
},
Option::None => { break; }
};
};
output.span()
}
/// Creates a list of all axes of given shape
///
/// # Arguments
///
/// * `shape` - A span containing the input tensor's shape as usize elements.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * `Span<usize>` - A span containing the usize elements representing the axes.
fn get_all_axes(shape: Span<usize>) -> Span<usize> {
let mut ret: Array<usize> = array![];
let mut i: usize = 0;
let stop_i = shape.len() - 1;
loop {
ret.append(i);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
}
/// Flatten a given array of tensors into an Array<T>.
fn flatten_array_of_tensors<T, +Copy<T>, +Drop<T>,>(
tensors: Array<Tensor<T>>, axis: usize, new_shape: Span<usize>
) -> Span<T> {
let mut new_stride = stride(new_shape);
let mut flattened: Array<T> = array![];
let stride_lim: usize = *new_stride.at(axis);
let max_row = (*(*tensors.at(0).shape).at(0));
let mut row = 0;
while row != max_row {
let mut tensors_span = tensors.span();
loop {
let mut i = 0;
match tensors_span.pop_front() {
Option::Some(mut t) => {
let mut data = *t.data;
while i != stride_lim {
let idx = i + (row * stride_lim);
flattened.append(*data.at(idx));
i += 1;
}
},
Option::None => { break; },
}
};
row += 1;
};
flattened.span()
}
/// Convert a Tensor to an array of tensors along a given axis.
fn as_tensors_array<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
tensor: @Tensor<T>, axis: usize
) -> Array<Tensor<T>> {
let shape = *tensor.shape;
let rank = shape.len();
let mut as_tensors: Array<Tensor<T>> = array![];
let mut axes: Array<usize> = array![];
let mut idx: usize = 0;
while idx != rank {
axes.append(idx);
idx += 1;
};
idx = 0;
let axis_len: usize = *shape.at(axis);
while idx != axis_len {
let mut starts: Array<usize> = array![];
let mut ends: Array<usize> = array![];
let mut i: usize = 0;
while i != rank {
starts.append(if i == axis {
idx
} else {
0
});
ends.append(if i == axis {
idx + 1
} else {
*shape.at(i)
});
i += 1;
};
let sub_tensor: Tensor<T> = tensor
.slice(
starts: starts.span(),
ends: ends.span(),
axes: Option::Some(axes.span()),
steps: Option::None(())
);
as_tensors.append(sub_tensor);
idx += 1;
};
as_tensors
}
/// Compares two Spans of generic type T.
///
/// # Returns
/// an i8 type containing:
/// * 1 if the left operand is greater than the right,
/// * 0 if the left operand is equal to the right,
/// * -1 if the left operand is lower than the right,
fn span_cmp<T, +Drop<T>, +Copy<T>, +PartialEq<T>, +PartialOrd<T>>(
lhs: Span<T>, rhs: Span<T>
) -> i8 {
let mut rhs = rhs;
let mut lhs = lhs;
let mut ret: i8 = 0;
loop {
match lhs.pop_front() {
Option::Some(l) => {
match rhs.pop_front() {
Option::Some(r) => { if l != r {
ret = if *l > *r {
1
} else {
-1
};
break;
} },
Option::None => {
ret = 1;
break;
},
}
},
Option::None => {
ret = -1;
break;
}
};
};
ret
}
/// Implements PartiaLOrd for two spans of generic type T.
impl SpanPartialOrd<T, +Drop<T>, +Copy<T>, +PartialEq<T>, +PartialOrd<T>> of PartialOrd<Span<T>> {
fn ge(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) >= 0
}
fn gt(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) > 0
}
fn le(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) <= 0
}
fn lt(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) < 0
}
}
/// Returns true if (1) the input is an optional-type and contains an element,
/// or, (2) the input is a tensor type.
/// If the input is not provided or is an empty optional-type, this op returns false.
///
/// # Arguments
/// * `x` - The optional input.
///
/// # Returns
/// * A scalar boolean tensor.
/// If true, it indicates that optional-type input contains an element. Otherwise, it is empty.
fn optional_has_element<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
x: Option<Tensor<T>>
) -> Tensor<bool> {
match x {
Option::Some => {
let mut shape: Array<usize> = array![];
shape.append(1);
let mut data: Array<bool> = array![];
data.append(true);
TensorTrait::new(shape.span(), data.span())
},
Option::None => {
let mut shape: Array<usize> = array![];
shape.append(1);
let mut data: Array<bool> = array![];
data.append(false);
TensorTrait::new(shape.span(), data.span())
}
}
}
/// If the input is a tensor type, it returns the input.
/// If the input is an optional type, it outputs the element in the input.
///
/// # Arguments
/// * `x` - The optional input.
///
/// # Panics
/// * Panics if the input is an empty optional-type (i.e. does not have an element)
/// and the behavior is undefined in this case.
///
/// # Returns
/// * Output element in the optional input.
fn optional_get_element<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
x: Option<Tensor<T>>
) -> Tensor<T> {
match x {
Option::Some(ele) => { ele },
Option::None => { panic(array!['The input is an empty', 'optional-type.']) }
}
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations.cairo | mod tensor_bool;
mod tensor_u32;
mod tensor_i8;
mod tensor_i32;
mod tensor_fp8x23;
mod tensor_fp16x16;
mod tensor_fp64x64;
mod tensor_fp32x32;
mod tensor_fp16x16wide;
mod tensor_fp8x23wide;
mod tensor_complex64;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_bool.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{
constant_of_shape, new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_ops, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::tensor_u32::U32Tensor;
impl BoolTensor of TensorTrait<bool> {
fn new(shape: Span<usize>, data: Span<bool>) -> Tensor<bool> {
new_tensor(shape, data)
}
fn at(self: @Tensor<bool>, indices: Span<usize>) -> bool {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sub(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn mul(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn div(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn min_in_tensor(self: @Tensor<bool>) -> bool {
panic(array!['not supported!'])
}
fn min(tensors: Span<Tensor<bool>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn max_in_tensor(self: @Tensor<bool>) -> bool {
panic(array!['not supported!'])
}
fn max(tensors: Span<Tensor<bool>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn stride(self: @Tensor<bool>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<bool>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<bool>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<bool>, target_shape: Span<i32>, allowzero: bool) -> Tensor<bool> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<bool>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_prod(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn argmax(
self: @Tensor<bool>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn argmin(
self: @Tensor<bool>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn transpose(self: @Tensor<bool>, axes: Span<usize>) -> Tensor<bool> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn exp(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn greater_equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn less(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn less_equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn abs(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn neg(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn ceil(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<bool>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn flatten(self: @Tensor<bool>, axis: usize) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sinh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn or(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn acos(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<bool>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<bool>>, axis: usize,) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn quantize_linear(
self: @Tensor<bool>, y_scale: @Tensor<bool>, y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<bool>, x_zero_point: @Tensor<bool>
) -> Tensor::<bool> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<bool>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<bool> {
core_ops::slice::<bool>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<bool>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<bool> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn squeeze(self: @Tensor<bool>, axes: Option<Span<usize>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn unsqueeze(self: @Tensor<bool>, axes: Span<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sign(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn clip(self: @Tensor<bool>, min: Option<bool>, max: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<bool>) -> Tensor<bool> {
core_ops::identity(self)
}
fn where(self: @Tensor<bool>, x: @Tensor<bool>, y: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<bool>) -> Tensor<bool> {
math::not::not(*self)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<bool>>,
zero_points: Span<Tensor<bool>>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<bool>, a_zero_point: @Tensor<bool>, alpha: bool,
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn round(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn scatter(
self: @Tensor<bool>,
updates: Tensor<bool>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn trilu(self: @Tensor<bool>, upper: bool, k: i64) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_xor(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_or(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_l1(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_l2(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn constant_of_shape(shape: Span<usize>, value: bool) -> Tensor<bool> {
constant_of_shape(shape, value)
}
fn gather_elements(
self: @Tensor<bool>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<bool> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<bool>, bias: Option<bool>, lambd: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<bool>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn binarizer(self: @Tensor<bool>, threshold: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn array_feature_extractor(self: @Tensor<bool>, indices: Tensor<usize>) -> Tensor<bool> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn reduce_min(
self: @Tensor<bool>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn pow(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<bool>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_nan(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported'])
}
fn unique(
self: @Tensor<bool>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<bool>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<bool>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<bool> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn compress(
self: @Tensor<bool>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<bool> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<bool>,
scale: @Tensor<bool>,
B: Option<@Tensor<bool>>,
axis: Option<i32>,
epsilon: Option<bool>,
stash_type: Option<usize>,
) -> (Tensor<bool>, Tensor<bool>, Tensor<bool>) {
panic(array!['not supported!'])
}
fn resize(
self: @Tensor<bool>,
roi: Option<Tensor<bool>>,
scales: Option<Span<bool>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<bool>,
exclude_outside: Option<bool>,
extrapolation_value: Option<bool>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<bool>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<bool>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn split_to_sequence(
self: @Tensor<bool>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<bool>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<bool>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<bool> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<bool>) -> Option<Tensor<bool>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<bool>
) -> (Tensor::<u32>, Tensor::<bool>, Tensor<bool>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<bool>, updates: Tensor<bool>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn range(start: bool, end: bool, step: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn hann_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn hamming_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn blackman_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn random_uniform_like(
tensor: @Tensor<bool>, high: Option<bool>, low: Option<bool>, seed: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn label_encoder(
self: @Tensor<bool>,
default_list: Option<Span<bool>>,
default_tensor: Option<Tensor<bool>>,
keys: Option<Span<bool>>,
keys_tensor: Option<Tensor<bool>>,
values: Option<Span<bool>>,
values_tensor: Option<Tensor<bool>>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
}
/// Implements partial equal for two `Tensor<bool>` using the `PartialEq` trait.
impl BoolTensorPartialEq of PartialEq<Tensor<bool>> {
fn eq(lhs: @Tensor<bool>, rhs: @Tensor<bool>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<bool>, rhs: @Tensor<bool>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl BoolTryIntobool of TryInto<bool, bool> {
fn try_into(self: bool) -> Option<bool> {
Option::Some(self)
}
}
// Internals
fn tensor_eq(mut lhs: Tensor<bool>, mut rhs: Tensor<bool>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_complex64.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP64x64, FP64x64Impl};
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::complex_number::complex_trait::ComplexTrait;
use orion::numbers::complex_number::complex64::{Complex64Impl, complex64};
impl Complex64Tensor of TensorTrait<complex64> {
fn new(shape: Span<usize>, data: Span<complex64>) -> Tensor<complex64> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: complex64) -> Tensor<complex64> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<complex64>, indices: Span<usize>) -> complex64 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<complex64>) -> complex64 {
panic(array!['not supported!'])
}
fn min(tensors: Span<Tensor<complex64>>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn max_in_tensor(self: @Tensor<complex64>) -> complex64 {
panic(array!['not supported!'])
}
fn max(tensors: Span<Tensor<complex64>>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn stride(self: @Tensor<complex64>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<complex64>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<complex64>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(
self: @Tensor<complex64>, target_shape: Span<i32>, allowzero: bool
) -> Tensor<complex64> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<complex64>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<complex64>,
axis: i32,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn argmin(
self: @Tensor<complex64>,
axis: usize,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn transpose(self: @Tensor<complex64>, axes: Span<usize>) -> Tensor<complex64> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<complex64>) -> Tensor<complex64> {
math::exp::exp(*self)
}
fn log(self: @Tensor<complex64>) -> Tensor<complex64> {
math::log::log(*self)
}
fn equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn greater_equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn less(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn less_equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn abs(self: @Tensor<complex64>) -> Tensor<complex64> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn ceil(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<complex64>) -> Tensor<complex64> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<complex64>) -> Tensor<complex64> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<complex64>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<complex64> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<complex64>, axis: usize) -> Tensor<complex64> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<complex64>) -> Tensor<complex64> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn or(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn acos(self: @Tensor<complex64>) -> Tensor<complex64> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<complex64>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<complex64>>, axis: usize,) -> Tensor<complex64> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<complex64>, y_scale: @Tensor<complex64>, y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<complex64>, x_zero_point: @Tensor<complex64>
) -> Tensor::<complex64> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<complex64>>,
zero_points: Span<Tensor<complex64>>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
alpha: complex64
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<complex64>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<complex64> {
core_tensor::slice::<complex64>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<complex64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<complex64> {
math::gather::gather(self, indices, axis)
}
fn gather_nd(
self: @Tensor<complex64>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<complex64> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn nonzero(self: @Tensor<complex64>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<complex64>, axes: Option<Span<usize>>) -> Tensor<complex64> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<complex64>, axes: Span<usize>) -> Tensor<complex64> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn clip(
self: @Tensor<complex64>, min: Option<complex64>, max: Option<complex64>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<complex64>) -> Tensor<complex64> {
core_tensor::identity(self)
}
fn where(
self: @Tensor<complex64>, x: @Tensor<complex64>, y: @Tensor<complex64>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_and(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_xor(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_or(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn round(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_l1(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(
self: @Tensor<complex64>, indices: Tensor<usize>
) -> Tensor<complex64> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<complex64>, threshold: Option<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_sum_square(
self: @Tensor<complex64>, axis: usize, keepdims: bool
) -> Tensor<complex64> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_l2::reduce_l2_complex(self, axis, keepdims)
}
fn trilu(self: @Tensor<complex64>, upper: bool, k: i64) -> Tensor<complex64> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<complex64>,
updates: Tensor<complex64>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<complex64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<complex64> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<complex64>, bias: Option<complex64>, lambd: Option<complex64>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn pow(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<complex64>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_nan(self: @Tensor<complex64>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn erf(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<complex64>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<complex64>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn compress(
self: @Tensor<complex64>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<complex64> {
math::compress::compress(self, condition, axis)
}
fn reduce_log_sum_exp(
self: @Tensor<complex64>, axis: usize, keepdims: bool
) -> Tensor<complex64> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn layer_normalization(
self: @Tensor<complex64>,
scale: @Tensor<complex64>,
B: Option<@Tensor<complex64>>,
axis: Option<i32>,
epsilon: Option<complex64>,
stash_type: Option<usize>,
) -> (Tensor<complex64>, Tensor<complex64>, Tensor<complex64>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<complex64>,
axis: usize,
num_outputs: Option<usize>,
spl: Option<Tensor<usize>>
) -> Array<Tensor<complex64>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn reverse_sequence(
self: @Tensor<complex64>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<complex64> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn resize(
self: @Tensor<complex64>,
roi: Option<Tensor<complex64>>,
scales: Option<Span<complex64>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<complex64>,
exclude_outside: Option<bool>,
extrapolation_value: Option<complex64>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn random_uniform_like(
tensor: @Tensor<complex64>,
high: Option<complex64>,
low: Option<complex64>,
seed: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn range(start: complex64, end: complex64, step: complex64) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn hann_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn hamming_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn blackman_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<complex64>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<complex64>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn optional(self: @Tensor<complex64>) -> Option<Tensor<complex64>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<complex64>
) -> (Tensor::<u32>, Tensor::<complex64>, Tensor<complex64>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<complex64>,
updates: Tensor<complex64>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn label_encoder(
self: @Tensor<complex64>,
default_list: Option<Span<complex64>>,
default_tensor: Option<Tensor<complex64>>,
keys: Option<Span<complex64>>,
keys_tensor: Option<Tensor<complex64>>,
values: Option<Span<complex64>>,
values_tensor: Option<Tensor<complex64>>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
}
/// Implements addition for `Tensor<complex64>` using the `Add` trait.
impl Complex64TensorAdd of Add<Tensor<complex64>> {
/// Adds two `Tensor<complex64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<complex64>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<complex64>` using the `Sub` trait.
impl Complex64TensorSub of Sub<Tensor<complex64>> {
/// Subtracts two `Tensor<complex64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<complex64>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<complex64>` using the `Mul` trait.
impl Complex64TensorMul of Mul<Tensor<complex64>> {
/// Multiplies two `Tensor<complex64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<complex64>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<complex64>` using the `Div` trait.
impl Complex64TensorDiv of Div<Tensor<complex64>> {
/// Divides two `Tensor<complex64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<complex64>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<complex64>` using the `complex64` trait.
impl Complex64TensorPartialEq of PartialEq<Tensor<complex64>> {
fn eq(lhs: @Tensor<complex64>, rhs: @Tensor<complex64>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<complex64>, rhs: @Tensor<complex64>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
// Internals
fn eq(lhs: @complex64, rhs: @complex64) -> bool {
let eq = (*lhs.real == *rhs.real) && (*lhs.img == *rhs.img);
eq
}
fn tensor_eq(mut lhs: Tensor<complex64>, mut rhs: Tensor<complex64>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp16x16.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP16x16, I8IntoFP16x16};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp16x16::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W;
impl FP16x16Tensor of TensorTrait<FP16x16> {
fn new(shape: Span<usize>, data: Span<FP16x16>) -> Tensor<FP16x16> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP16x16) -> Tensor<FP16x16> {
constant_of_shape(shape, value)
}
fn add(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::div(@lhs, @rhs)
}
fn at(self: @Tensor<FP16x16>, indices: Span<usize>) -> FP16x16 {
*at_tensor(self, indices)
}
fn min_in_tensor(self: @Tensor<FP16x16>) -> FP16x16 {
math::min_in_tensor::min_in_tensor::<FP16x16, u32>(*self.data)
}
fn min(tensors: Span<Tensor<FP16x16>>) -> Tensor<FP16x16> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP16x16>) -> FP16x16 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP16x16>>) -> Tensor<FP16x16> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP16x16>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP16x16>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP16x16>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP16x16>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP16x16> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP16x16>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP16x16>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP16x16>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP16x16>, axes: Span<usize>) -> Tensor<FP16x16> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP16x16>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP16x16> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP16x16>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP16x16> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP16x16>>, axis: usize,) -> Tensor<FP16x16> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP16x16>, y_scale: @Tensor<FP16x16>, y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP16x16>, x_zero_point: @Tensor<FP16x16>
) -> Tensor::<FP16x16> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP16x16>>,
zero_points: Span<Tensor<FP16x16>>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP16x16>, a_zero_point: @Tensor<FP16x16>, alpha: FP16x16
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP16x16>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP16x16> {
core_tensor::slice::<FP16x16>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP16x16>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP16x16>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP16x16>, axes: Option<Span<usize>>) -> Tensor<FP16x16> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP16x16>, axes: Span<usize>) -> Tensor<FP16x16> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP16x16>, min: Option<FP16x16>, max: Option<FP16x16>) -> Tensor<FP16x16> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP16x16>, x: @Tensor<FP16x16>, y: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP16x16>, indices: Tensor<usize>) -> Tensor<FP16x16> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP16x16>, threshold: Option<FP16x16>) -> Tensor<FP16x16> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP16x16>, upper: bool, k: i64) -> Tensor<FP16x16> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP16x16>,
updates: Tensor<FP16x16>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP16x16>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP16x16>, bias: Option<FP16x16>, lambd: Option<FP16x16>
) -> Tensor<FP16x16> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP16x16>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP16x16>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP16x16>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP16x16> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP16x16>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP16x16>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP16x16>,
scale: @Tensor<FP16x16>,
B: Option<@Tensor<FP16x16>>,
axis: Option<i32>,
epsilon: Option<FP16x16>,
stash_type: Option<usize>,
) -> (Tensor<FP16x16>, Tensor<FP16x16>, Tensor<FP16x16>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP16x16>,
roi: Option<Tensor<FP16x16>>,
scales: Option<Span<FP16x16>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP16x16>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP16x16>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP16x16> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP16x16>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP16x16> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP16x16>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP16x16>, high: Option<FP16x16>, low: Option<FP16x16>, seed: Option<usize>
) -> Tensor<FP16x16> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP16x16, end: FP16x16, step: FP16x16) -> Tensor<FP16x16> {
math::range::range(start, end, step)
}
fn hann_window(size: FP16x16, periodic: Option<usize>) -> Tensor<FP16x16> {
math::hann_window::hann_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP16x16, periodic: Option<usize>) -> Tensor<FP16x16> {
math::hamming_window::hamming_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP16x16, periodic: Option<usize>) -> Tensor<FP16x16> {
math::blackman_window::blackman_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP16x16>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP16x16>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP16x16> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP16x16>) -> Option<Tensor<FP16x16>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP16x16>
) -> (Tensor::<u32>, Tensor::<FP16x16>, Tensor<FP16x16>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP16x16>,
updates: Tensor<FP16x16>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP16x16>,
default_list: Option<Span<FP16x16>>,
default_tensor: Option<Tensor<FP16x16>>,
keys: Option<Span<FP16x16>>,
keys_tensor: Option<Tensor<FP16x16>>,
values: Option<Span<FP16x16>>,
values_tensor: Option<Tensor<FP16x16>>
) -> Tensor<FP16x16> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP16x16>` using the `Add` trait.
impl FP16x16TensorAdd of Add<Tensor<FP16x16>> {
/// Adds two `Tensor<FP16x16>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP16x16>` using the `Sub` trait.
impl FP16x16TensorSub of Sub<Tensor<FP16x16>> {
/// Subtracts two `Tensor<FP16x16>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP16x16>` using the `Mul` trait.
impl FP16x16TensorMul of Mul<Tensor<FP16x16>> {
/// Multiplies two `Tensor<FP16x16>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP16x16>` using the `Div` trait.
impl FP16x16TensorDiv of Div<Tensor<FP16x16>> {
/// Divides two `Tensor<FP16x16>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP16x16>` using the `PartialEq` trait.
impl FP16x16TensorPartialEq of PartialEq<Tensor<FP16x16>> {
fn eq(lhs: @Tensor<FP16x16>, rhs: @Tensor<FP16x16>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP16x16>, rhs: @Tensor<FP16x16>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl TensorI8IntoTensorFP16x16 of Into<Tensor<i8>, Tensor<FP16x16>> {
fn into(self: Tensor<i8>) -> Tensor<FP16x16> {
tensor_i8_to_tensor_fp16x16(@self)
}
}
/// Implements partial ord for two `Tensor<FP16x16>` using `PartialOrd` trait.
impl FP16x16TensorPartialOrd of PartialOrd<Tensor<FP16x16>> {
#[inline(always)]
fn ge(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
const PRECISION: u32 = 589; // 0.009
fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP16x16>, mut rhs: Tensor<FP16x16>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp16x16(x: @Tensor<i8>) -> Tensor<FP16x16> {
let mut result_data = ArrayTrait::<FP16x16>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp16x16wide.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP16x16W};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp16x16wide::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16IntoFP16x16W
};
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
impl FP16x16WTensor of TensorTrait<FP16x16W> {
fn new(shape: Span<usize>, data: Span<FP16x16W>) -> Tensor<FP16x16W> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP16x16W) -> Tensor<FP16x16W> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP16x16W>, indices: Span<usize>) -> FP16x16W {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP16x16W>) -> FP16x16W {
math::min_in_tensor::min_in_tensor::<FP16x16W, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP16x16W>>) -> Tensor<FP16x16W> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP16x16W>) -> FP16x16W {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP16x16W>>) -> Tensor<FP16x16W> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP16x16W>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP16x16W>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP16x16W>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP16x16W>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP16x16W> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP16x16W>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP16x16W>,
axis: i32,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP16x16W>,
axis: usize,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP16x16W>, axes: Span<usize>) -> Tensor<FP16x16W> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP16x16W>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP16x16W> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP16x16W>, axis: usize) -> Tensor<FP16x16W> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP16x16W>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP16x16W>>, axis: usize,) -> Tensor<FP16x16W> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP16x16W>, y_scale: @Tensor<FP16x16W>, y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP16x16W>, x_zero_point: @Tensor<FP16x16W>
) -> Tensor::<FP16x16W> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tensor<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tensor<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tensor<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP16x16W>>,
zero_points: Span<Tensor<FP16x16W>>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
alpha: FP16x16W
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<FP16x16W>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP16x16W> {
core_tensor::slice::<FP16x16W>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP16x16W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16W> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP16x16W>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP16x16W>, axes: Option<Span<usize>>) -> Tensor<FP16x16W> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP16x16W>, axes: Span<usize>) -> Tensor<FP16x16W> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sign::sign(*self)
}
fn clip(
self: @Tensor<FP16x16W>, min: Option<FP16x16W>, max: Option<FP16x16W>
) -> Tensor<FP16x16W> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
core_tensor::identity(self)
}
fn where(
self: @Tensor<FP16x16W>, x: @Tensor<FP16x16W>, y: @Tensor<FP16x16W>
) -> Tensor<FP16x16W> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP16x16W>, upper: bool, k: i64) -> Tensor<FP16x16W> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP16x16W>,
updates: Tensor<FP16x16W>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16W> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(
self: @Tensor<FP16x16W>, indices: Tensor<usize>
) -> Tensor<FP16x16W> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP16x16W>, threshold: Option<FP16x16W>) -> Tensor<FP16x16W> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn not(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP16x16W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16W> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP16x16W>, bias: Option<FP16x16W>, lambd: Option<FP16x16W>
) -> Tensor<FP16x16W> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP16x16W>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP16x16W>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP16x16W>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP16x16W> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(
self: @Tensor<FP16x16W>, axis: usize, keepdims: bool
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP16x16W>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP16x16W>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn compress(
self: @Tensor<FP16x16W>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP16x16W> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<FP16x16W>,
scale: @Tensor<FP16x16W>,
B: Option<@Tensor<FP16x16W>>,
axis: Option<i32>,
epsilon: Option<FP16x16W>,
stash_type: Option<usize>,
) -> (Tensor<FP16x16W>, Tensor<FP16x16W>, Tensor<FP16x16W>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP16x16W>,
roi: Option<Tensor<FP16x16W>>,
scales: Option<Span<FP16x16W>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP16x16W>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP16x16W>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<FP16x16W>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16W>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP16x16W>,
high: Option<FP16x16W>,
low: Option<FP16x16W>,
seed: Option<usize>
) -> Tensor<FP16x16W> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP16x16W, end: FP16x16W, step: FP16x16W) -> Tensor<FP16x16W> {
math::range::range(start, end, step)
}
fn hann_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::hann_window::hann_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::hamming_window::hamming_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::blackman_window::blackman_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP16x16W>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16W>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP16x16W>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP16x16W> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP16x16W>) -> Option<Tensor<FP16x16W>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP16x16W>
) -> (Tensor::<u32>, Tensor::<FP16x16W>, Tensor<FP16x16W>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP16x16W>,
updates: Tensor<FP16x16W>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16W> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP16x16W>,
default_list: Option<Span<FP16x16W>>,
default_tensor: Option<Tensor<FP16x16W>>,
keys: Option<Span<FP16x16W>>,
keys_tensor: Option<Tensor<FP16x16W>>,
values: Option<Span<FP16x16W>>,
values_tensor: Option<Tensor<FP16x16W>>
) -> Tensor<FP16x16W> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP16x16W>` using the `Add` trait.
impl FP16x16WTensorAdd of Add<Tensor<FP16x16W>> {
/// Adds two `Tensor<FP16x16W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16W>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP16x16W>` using the `Sub` trait.
impl FP16x16WTensorSub of Sub<Tensor<FP16x16W>> {
/// Subtracts two `Tensor<FP16x16W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16W>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP16x16W>` using the `Mul` trait.
impl FP16x16WTensorMul of Mul<Tensor<FP16x16W>> {
/// Multiplies two `Tensor<FP16x16W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16W>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP16x16W>` using the `Div` trait.
impl FP16x16WTensorDiv of Div<Tensor<FP16x16W>> {
/// Divides two `Tensor<FP16x16W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP16x16W>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP16x16W>` using the `PartialEq` trait.
impl FP16x16WTensorPartialEq of PartialEq<Tensor<FP16x16W>> {
fn eq(lhs: @Tensor<FP16x16W>, rhs: @Tensor<FP16x16W>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP16x16W>, rhs: @Tensor<FP16x16W>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoU32 of TryInto<u32, u32> {
fn try_into(self: u32) -> Option<u32> {
Option::Some(self)
}
}
/// Implements partial ord for two `Tensor<FP16x16W>` using `PartialOrd` trait.
impl FP16x16WTensorPartialOrd of PartialOrd<Tensor<FP16x16W>> {
#[inline(always)]
fn ge(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
const PRECISION: u64 = 589; // 0.009
fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP16x16W>, mut rhs: Tensor<FP16x16W>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp32x32.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP32x32, FP32x32Impl, I8IntoFP32x32};
use orion::numbers::fixed_point::implementations::fp32x32::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
impl FP32x32Tensor of TensorTrait<FP32x32> {
fn new(shape: Span<usize>, data: Span<FP32x32>) -> Tensor<FP32x32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP32x32) -> Tensor<FP32x32> {
constant_of_shape(shape, value)
}
fn add(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::div(@lhs, @rhs)
}
fn at(self: @Tensor<FP32x32>, indices: Span<usize>) -> FP32x32 {
*at_tensor(self, indices)
}
fn min_in_tensor(self: @Tensor<FP32x32>) -> FP32x32 {
math::min_in_tensor::min_in_tensor::<FP32x32, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP32x32>>) -> Tensor<FP32x32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP32x32>) -> FP32x32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP32x32>>) -> Tensor<FP32x32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP32x32>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP32x32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP32x32>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP32x32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP32x32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP32x32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP32x32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP32x32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP32x32>, axes: Span<usize>) -> Tensor<FP32x32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP32x32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP32x32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP32x32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP32x32> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP32x32>>, axis: usize,) -> Tensor<FP32x32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP32x32>, y_scale: @Tensor<FP32x32>, y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP32x32>, x_zero_point: @Tensor<FP32x32>
) -> Tensor::<FP32x32> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tensor<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tensor<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tensor<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP32x32>>,
zero_points: Span<Tensor<FP32x32>>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP32x32>, a_zero_point: @Tensor<FP32x32>, alpha: FP32x32
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP32x32>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP32x32> {
core_tensor::slice::<FP32x32>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP32x32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP32x32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP32x32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP32x32>, axes: Option<Span<usize>>) -> Tensor<FP32x32> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP32x32>, axes: Span<usize>) -> Tensor<FP32x32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP32x32>, min: Option<FP32x32>, max: Option<FP32x32>) -> Tensor<FP32x32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP32x32>, x: @Tensor<FP32x32>, y: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::round::round(*self)
}
fn trilu(self: @Tensor<FP32x32>, upper: bool, k: i64) -> Tensor<FP32x32> {
linalg::trilu::trilu(self, upper, k)
}
fn reduce_l1(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn scatter(
self: @Tensor<FP32x32>,
updates: Tensor<FP32x32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP32x32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<FP32x32>, indices: Tensor<usize>) -> Tensor<FP32x32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP32x32>, threshold: Option<FP32x32>) -> Tensor<FP32x32> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn not(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP32x32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP32x32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP32x32>, bias: Option<FP32x32>, lambd: Option<FP32x32>
) -> Tensor<FP32x32> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP32x32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP32x32>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP32x32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP32x32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn erf(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP32x32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP32x32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP32x32>,
scale: @Tensor<FP32x32>,
B: Option<@Tensor<FP32x32>>,
axis: Option<i32>,
epsilon: Option<FP32x32>,
stash_type: Option<usize>,
) -> (Tensor<FP32x32>, Tensor<FP32x32>, Tensor<FP32x32>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP32x32>,
roi: Option<Tensor<FP32x32>>,
scales: Option<Span<FP32x32>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP32x32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP32x32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP32x32> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP32x32>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP32x32> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP32x32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP32x32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP32x32>, high: Option<FP32x32>, low: Option<FP32x32>, seed: Option<usize>
) -> Tensor<FP32x32> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP32x32, end: FP32x32, step: FP32x32) -> Tensor<FP32x32> {
math::range::range(start, end, step)
}
fn hann_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn hamming_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn blackman_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<FP32x32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP32x32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP32x32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP32x32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP32x32>) -> Option<Tensor<FP32x32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP32x32>
) -> (Tensor::<u32>, Tensor::<FP32x32>, Tensor<FP32x32>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP32x32>,
updates: Tensor<FP32x32>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP32x32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP32x32>,
default_list: Option<Span<FP32x32>>,
default_tensor: Option<Tensor<FP32x32>>,
keys: Option<Span<FP32x32>>,
keys_tensor: Option<Tensor<FP32x32>>,
values: Option<Span<FP32x32>>,
values_tensor: Option<Tensor<FP32x32>>
) -> Tensor<FP32x32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP32x32>` using the `Add` trait.
impl FP32x32TensorAdd of Add<Tensor<FP32x32>> {
/// Adds two `Tensor<FP32x32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP32x32>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP32x32>` using the `Sub` trait.
impl FP32x32TensorSub of Sub<Tensor<FP32x32>> {
/// Subtracts two `Tensor<FP32x32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP32x32>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP32x32>` using the `Mul` trait.
impl FP32x32TensorMul of Mul<Tensor<FP32x32>> {
/// Multiplies two `Tensor<FP32x32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP32x32>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP32x32>` using the `Div` trait.
impl FP32x32TensorDiv of Div<Tensor<FP32x32>> {
/// Divides two `Tensor<FP32x32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP32x32>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP32x32>` using the `PartialEq` trait.
impl FP32x32TensorPartialEq of PartialEq<Tensor<FP32x32>> {
fn eq(lhs: @Tensor<FP32x32>, rhs: @Tensor<FP32x32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP32x32>, rhs: @Tensor<FP32x32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl FP32x32TryIntoI8 of TryInto<FP32x32, i8> {
fn try_into(self: FP32x32) -> Option<i8> {
let number_felt: felt252 = (self.mag / ONE).into();
let number_i8: i8 = number_felt.try_into().unwrap();
if self.sign {
return Option::Some(number_i8 * -1_i8);
}
Option::Some(number_i8)
}
}
impl TensorI8IntoTensorFP32x32 of Into<Tensor<i8>, Tensor<FP32x32>> {
fn into(self: Tensor<i8>) -> Tensor<FP32x32> {
tensor_i8_to_tensor_fp32x32(@self)
}
}
/// Implements partial ord for two `Tensor<FP32x32>` using `PartialOrd` trait.
impl FP32x32TensorPartialOrd of PartialOrd<Tensor<FP32x32>> {
#[inline(always)]
fn ge(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::ge(lhs.data, rhs.data);
}
#[inline(always)]
fn gt(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::gt(lhs.data, rhs.data);
}
#[inline(always)]
fn le(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::le(lhs.data, rhs.data);
}
#[inline(always)]
fn lt(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::lt(lhs.data, rhs.data);
}
}
// Internals
const PRECISION: u64 = 75497; // 0.009
fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP32x32>, mut rhs: Tensor<FP32x32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp32x32(x: @Tensor<i8>) -> Tensor<FP32x32> {
let mut result_data = ArrayTrait::<FP32x32>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp64x64.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP64x64, FP64x64Impl, I8IntoFP64x64};
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
impl FP64x64Tensor of TensorTrait<FP64x64> {
fn new(shape: Span<usize>, data: Span<FP64x64>) -> Tensor<FP64x64> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP64x64) -> Tensor<FP64x64> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP64x64>, indices: Span<usize>) -> FP64x64 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP64x64>) -> FP64x64 {
math::min_in_tensor::min_in_tensor::<FP64x64, u128>(*self.data)
}
fn min(tensors: Span<Tensor<FP64x64>>) -> Tensor<FP64x64> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP64x64>) -> FP64x64 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP64x64>>) -> Tensor<FP64x64> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP64x64>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP64x64>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP64x64>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP64x64>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP64x64> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP64x64>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP64x64>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP64x64>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP64x64>, axes: Span<usize>) -> Tensor<FP64x64> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP64x64>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP64x64> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP64x64>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP64x64> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP64x64>>, axis: usize,) -> Tensor<FP64x64> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP64x64>, y_scale: @Tensor<FP64x64>, y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP64x64>, x_zero_point: @Tensor<FP64x64>
) -> Tensor::<FP64x64> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tensor<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tensor<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tensor<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP64x64>>,
zero_points: Span<Tensor<FP64x64>>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP64x64>, a_zero_point: @Tensor<FP64x64>, alpha: FP64x64
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP64x64>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP64x64> {
core_tensor::slice::<FP64x64>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP64x64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP64x64> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP64x64>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP64x64>, axes: Option<Span<usize>>) -> Tensor<FP64x64> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP64x64>, axes: Span<usize>) -> Tensor<FP64x64> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP64x64>, min: Option<FP64x64>, max: Option<FP64x64>) -> Tensor<FP64x64> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP64x64>, x: @Tensor<FP64x64>, y: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP64x64>, indices: Tensor<usize>) -> Tensor<FP64x64> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP64x64>, threshold: Option<FP64x64>) -> Tensor<FP64x64> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP64x64>, upper: bool, k: i64) -> Tensor<FP64x64> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP64x64>,
updates: Tensor<FP64x64>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP64x64> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP64x64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP64x64> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP64x64>, bias: Option<FP64x64>, lambd: Option<FP64x64>
) -> Tensor<FP64x64> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP64x64>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP64x64>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP64x64>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP64x64> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn erf(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP64x64>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP64x64>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP64x64>,
scale: @Tensor<FP64x64>,
B: Option<@Tensor<FP64x64>>,
axis: Option<i32>,
epsilon: Option<FP64x64>,
stash_type: Option<usize>,
) -> (Tensor<FP64x64>, Tensor<FP64x64>, Tensor<FP64x64>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP64x64>,
roi: Option<Tensor<FP64x64>>,
scales: Option<Span<FP64x64>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP64x64>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP64x64>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP64x64> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP64x64>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP64x64> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP64x64>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP64x64>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP64x64>, high: Option<FP64x64>, low: Option<FP64x64>, seed: Option<usize>
) -> Tensor<FP64x64> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP64x64, end: FP64x64, step: FP64x64) -> Tensor<FP64x64> {
math::range::range(start, end, step)
}
fn hann_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn hamming_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn blackman_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<FP64x64>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP64x64>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP64x64>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP64x64> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP64x64>) -> Option<Tensor<FP64x64>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP64x64>
) -> (Tensor::<u32>, Tensor::<FP64x64>, Tensor<FP64x64>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP64x64>,
updates: Tensor<FP64x64>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP64x64> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP64x64>,
default_list: Option<Span<FP64x64>>,
default_tensor: Option<Tensor<FP64x64>>,
keys: Option<Span<FP64x64>>,
keys_tensor: Option<Tensor<FP64x64>>,
values: Option<Span<FP64x64>>,
values_tensor: Option<Tensor<FP64x64>>
) -> Tensor<FP64x64> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP64x64>` using the `Add` trait.
impl FP64x64TensorAdd of Add<Tensor<FP64x64>> {
/// Adds two `Tensor<FP64x64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP64x64>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP64x64>` using the `Sub` trait.
impl FP64x64TensorSub of Sub<Tensor<FP64x64>> {
/// Subtracts two `Tensor<FP64x64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP64x64>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP64x64>` using the `Mul` trait.
impl FP64x64TensorMul of Mul<Tensor<FP64x64>> {
/// Multiplies two `Tensor<FP64x64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP64x64>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP64x64>` using the `Div` trait.
impl FP64x64TensorDiv of Div<Tensor<FP64x64>> {
/// Divides two `Tensor<FP64x64>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP64x64>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP64x64>` using the `PartialEq` trait.
impl FP64x64TensorPartialEq of PartialEq<Tensor<FP64x64>> {
fn eq(lhs: @Tensor<FP64x64>, rhs: @Tensor<FP64x64>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP64x64>, rhs: @Tensor<FP64x64>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl FP64x64TryIntoI8 of TryInto<FP64x64, i8> {
fn try_into(self: FP64x64) -> Option<i8> {
let number_felt: felt252 = (self.mag / ONE).into();
let number_i8: i8 = number_felt.try_into().unwrap();
if self.sign {
return Option::Some(number_i8 * -1_i8);
}
Option::Some(number_i8)
}
}
impl TensorI8IntoTensorFP64x64 of Into<Tensor<i8>, Tensor<FP64x64>> {
fn into(self: Tensor<i8>) -> Tensor<FP64x64> {
tensor_i8_to_tensor_fp64x64(@self)
}
}
/// Implements partial ord for two `Tensor<FP64x64>` using `PartialOrd` trait.
impl FP64x64TensorPartialOrd of PartialOrd<Tensor<FP64x64>> {
#[inline(always)]
fn ge(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
const PRECISION: u128 = 1660000000000000; // 9e-05
fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP64x64>, mut rhs: Tensor<FP64x64>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.shape.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp64x64(x: @Tensor<i8>) -> Tensor<FP64x64> {
let mut result_data = ArrayTrait::<FP64x64>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp8x23.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_ops, ml, manipulation};
use orion::numbers::{NumberTrait, FP8x23, I8IntoFP8x23};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp8x23::math::trig::PI;
impl FP8x23Tensor of TensorTrait<FP8x23> {
fn new(shape: Span<usize>, data: Span<FP8x23>) -> Tensor<FP8x23> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP8x23) -> Tensor<FP8x23> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP8x23>, indices: Span<usize>) -> FP8x23 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP8x23>) -> FP8x23 {
math::min_in_tensor::min_in_tensor::<FP8x23, u32>(*self.data)
}
fn min(tensors: Span<Tensor<FP8x23>>) -> Tensor<FP8x23> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP8x23>) -> FP8x23 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP8x23>>) -> Tensor<FP8x23> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP8x23>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP8x23>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP8x23>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP8x23>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP8x23> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP8x23>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP8x23>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP8x23>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP8x23>, axes: Span<usize>) -> Tensor<FP8x23> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP8x23>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP8x23> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP8x23>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP8x23> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP8x23>>, axis: usize,) -> Tensor<FP8x23> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP8x23>, y_scale: @Tensor<FP8x23>, y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP8x23>, x_zero_point: @Tensor<FP8x23>
) -> Tensor::<FP8x23> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP8x23>>,
zero_points: Span<Tensor<FP8x23>>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP8x23>, a_zero_point: @Tensor<FP8x23>, alpha: FP8x23
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP8x23>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP8x23> {
core_ops::slice::<FP8x23>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<FP8x23>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<FP8x23> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP8x23>) -> Tensor<usize> {
core_ops::nonzero(self)
}
fn squeeze(self: @Tensor<FP8x23>, axes: Option<Span<usize>>) -> Tensor<FP8x23> {
core_ops::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP8x23>, axes: Span<usize>) -> Tensor<FP8x23> {
core_ops::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP8x23>, min: Option<FP8x23>, max: Option<FP8x23>) -> Tensor<FP8x23> {
core_ops::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
core_ops::identity(self)
}
fn where(self: @Tensor<FP8x23>, x: @Tensor<FP8x23>, y: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP8x23>, upper: bool, k: i64) -> Tensor<FP8x23> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP8x23>,
updates: Tensor<FP8x23>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn reduce_sum_square(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn gather_elements(
self: @Tensor<FP8x23>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<FP8x23>, bias: Option<FP8x23>, lambd: Option<FP8x23>) -> Tensor<FP8x23> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn binarizer(self: @Tensor<FP8x23>, threshold: Option<FP8x23>) -> Tensor<FP8x23> {
math::binarizer::binarizer(*self, threshold)
}
fn array_feature_extractor(self: @Tensor<FP8x23>, indices: Tensor<usize>) -> Tensor<FP8x23> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn not(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
panic(array!['not supported!'])
}
fn reduce_min(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP8x23>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP8x23>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP8x23>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP8x23> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP8x23>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP8x23>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP8x23>,
scale: @Tensor<FP8x23>,
B: Option<@Tensor<FP8x23>>,
axis: Option<i32>,
epsilon: Option<FP8x23>,
stash_type: Option<usize>,
) -> (Tensor<FP8x23>, Tensor<FP8x23>, Tensor<FP8x23>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP8x23>,
roi: Option<Tensor<FP8x23>>,
scales: Option<Span<FP8x23>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP8x23>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP8x23>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP8x23> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP8x23>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP8x23> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP8x23>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP8x23>, high: Option<FP8x23>, low: Option<FP8x23>, seed: Option<usize>
) -> Tensor<FP8x23> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP8x23, end: FP8x23, step: FP8x23) -> Tensor<FP8x23> {
math::range::range(start, end, step)
}
fn hann_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::hann_window::hann_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::hamming_window::hamming_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::blackman_window::blackman_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP8x23>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP8x23>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP8x23> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP8x23>) -> Option<Tensor<FP8x23>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP8x23>
) -> (Tensor::<u32>, Tensor::<FP8x23>, Tensor<FP8x23>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP8x23>,
updates: Tensor<FP8x23>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP8x23>,
default_list: Option<Span<FP8x23>>,
default_tensor: Option<Tensor<FP8x23>>,
keys: Option<Span<FP8x23>>,
keys_tensor: Option<Tensor<FP8x23>>,
values: Option<Span<FP8x23>>,
values_tensor: Option<Tensor<FP8x23>>
) -> Tensor<FP8x23> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP8x23>` using the `Add` trait.
impl FP8x23TensorAdd<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TAdd: Add<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Add<Tensor<FP8x23>> {
/// Adds two `Tensor<FP8x23>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP8x23>` using the `Sub` trait.
impl FP8x23TensorSub<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TSub: Sub<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Sub<Tensor<FP8x23>> {
/// Subtracts two `Tensor<FP8x23>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP8x23>` using the `Mul` trait.
impl FP8x23TensorMul<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TMul: Mul<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Mul<Tensor<FP8x23>> {
/// Multiplies two `Tensor<FP8x23>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP8x23>` using the `Div` trait.
impl FP8x23TensorDiv<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TDiv: Div<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Div<Tensor<FP8x23>> {
/// Divides two `Tensor<FP8x23>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP8x23>` using the `PartialEq` trait.
impl FP8x23TensorPartialEq of PartialEq<Tensor<FP8x23>> {
fn eq(lhs: @Tensor<FP8x23>, rhs: @Tensor<FP8x23>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP8x23>, rhs: @Tensor<FP8x23>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl TensorI8IntoTensorFP8x23 of Into<Tensor<i8>, Tensor<FP8x23>> {
fn into(self: Tensor<i8>) -> Tensor<FP8x23> {
tensor_i8_to_tensor_fp8x23(@self)
}
}
/// Implements partial ord for two `Tensor<FP8x23>` using `PartialOrd` trait.
impl FP8x23TensorPartialOrd of PartialOrd<Tensor<FP8x23>> {
#[inline(always)]
fn ge(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
const PRECISION: u32 = 75497; // 0.009
fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP8x23>, mut rhs: Tensor<FP8x23>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp8x23(x: @Tensor<i8>) -> Tensor<FP8x23> {
let mut result_data = ArrayTrait::<FP8x23>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_fp8x23wide.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP8x23W};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
impl FP8x23WTensor of TensorTrait<FP8x23W> {
fn new(shape: Span<usize>, data: Span<FP8x23W>) -> Tensor<FP8x23W> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP8x23W) -> Tensor<FP8x23W> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP8x23W>, indices: Span<usize>) -> FP8x23W {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP8x23W>) -> FP8x23W {
math::min_in_tensor::min_in_tensor::<FP8x23W, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP8x23W>>) -> Tensor<FP8x23W> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP8x23W>) -> FP8x23W {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP8x23W>>) -> Tensor<FP8x23W> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP8x23W>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP8x23W>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP8x23W>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP8x23W>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP8x23W> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP8x23W>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP8x23W>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP8x23W>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP8x23W>, axes: Span<usize>) -> Tensor<FP8x23W> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP8x23W>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP8x23W> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP8x23W>, axis: usize) -> Tensor<FP8x23W> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP8x23W>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP8x23W>>, axis: usize,) -> Tensor<FP8x23W> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP8x23W>, y_scale: @Tensor<FP8x23W>, y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP8x23W>, x_zero_point: @Tensor<FP8x23W>
) -> Tensor::<FP8x23W> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP8x23W>>,
zero_points: Span<Tensor<FP8x23W>>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP8x23W>, a_zero_point: @Tensor<FP8x23W>, alpha: FP8x23W
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<FP8x23W>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP8x23W> {
core_tensor::slice::<FP8x23W>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP8x23W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23W> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP8x23W>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP8x23W>, axes: Option<Span<usize>>) -> Tensor<FP8x23W> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP8x23W>, axes: Span<usize>) -> Tensor<FP8x23W> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP8x23W>, min: Option<FP8x23W>, max: Option<FP8x23W>) -> Tensor<FP8x23W> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP8x23W>, x: @Tensor<FP8x23W>, y: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP8x23W>, indices: Tensor<usize>) -> Tensor<FP8x23W> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP8x23W>, threshold: Option<FP8x23W>) -> Tensor<FP8x23W> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP8x23W>, upper: bool, k: i64) -> Tensor<FP8x23W> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP8x23W>,
updates: Tensor<FP8x23W>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23W> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP8x23W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23W> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP8x23W>, bias: Option<FP8x23W>, lambd: Option<FP8x23W>
) -> Tensor<FP8x23W> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP8x23W>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP8x23W>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP8x23W>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP8x23W> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP8x23W>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP8x23W>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP8x23W>,
scale: @Tensor<FP8x23W>,
B: Option<@Tensor<FP8x23W>>,
axis: Option<i32>,
epsilon: Option<FP8x23W>,
stash_type: Option<usize>,
) -> (Tensor<FP8x23W>, Tensor<FP8x23W>, Tensor<FP8x23W>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP8x23W>,
roi: Option<Tensor<FP8x23W>>,
scales: Option<Span<FP8x23W>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP8x23W>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP8x23W>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn compress(
self: @Tensor<FP8x23W>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP8x23W> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP8x23W>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23W>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP8x23W>, high: Option<FP8x23W>, low: Option<FP8x23W>, seed: Option<usize>
) -> Tensor<FP8x23W> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP8x23W, end: FP8x23W, step: FP8x23W) -> Tensor<FP8x23W> {
math::range::range(start, end, step)
}
fn hann_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::hann_window::hann_window(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::hamming_window::hamming_window(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::blackman_window::blackman_window(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP8x23W>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23W>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP8x23W>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP8x23W> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP8x23W>) -> Option<Tensor<FP8x23W>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP8x23W>
) -> (Tensor::<u32>, Tensor::<FP8x23W>, Tensor<FP8x23W>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP8x23W>,
updates: Tensor<FP8x23W>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23W> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP8x23W>,
default_list: Option<Span<FP8x23W>>,
default_tensor: Option<Tensor<FP8x23W>>,
keys: Option<Span<FP8x23W>>,
keys_tensor: Option<Tensor<FP8x23W>>,
values: Option<Span<FP8x23W>>,
values_tensor: Option<Tensor<FP8x23W>>
) -> Tensor<FP8x23W> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<FP8x23W>` using the `Add` trait.
impl FP8x23WTensorAdd<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TAdd: Add<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Add<Tensor<FP8x23W>> {
/// Adds two `Tensor<FP8x23W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23W>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<FP8x23W>` using the `Sub` trait.
impl FP8x23WTensorSub<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TSub: Sub<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Sub<Tensor<FP8x23W>> {
/// Subtracts two `Tensor<FP8x23W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23W>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<FP8x23W>` using the `Mul` trait.
impl FP8x23WTensorMul<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TMul: Mul<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Mul<Tensor<FP8x23W>> {
/// Multiplies two `Tensor<FP8x23W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23W>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<FP8x23W>` using the `Div` trait.
impl FP8x23WTensorDiv<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TDiv: Div<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Div<Tensor<FP8x23W>> {
/// Divides two `Tensor<FP8x23W>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<FP8x23W>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<FP8x23W>` using the `PartialEq` trait.
impl FP8x23WTensorPartialEq of PartialEq<Tensor<FP8x23W>> {
fn eq(lhs: @Tensor<FP8x23W>, rhs: @Tensor<FP8x23W>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP8x23W>, rhs: @Tensor<FP8x23W>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoU32 of TryInto<u64, u64> {
fn try_into(self: u64) -> Option<u64> {
Option::Some(self)
}
}
/// Implements partial ord for two `Tensor<FP8x23W>` using `PartialOrd` trait.
impl FP8x23WTensorPartialOrd of PartialOrd<Tensor<FP8x23W>> {
#[inline(always)]
fn ge(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
const PRECISION: u64 = 75497; // 0.009
fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP8x23W>, mut rhs: Tensor<FP8x23W>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_i32.cairo | use orion::numbers::{I32Div, I32DivEq};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{
tensor_u32::U32Tensor, tensor_i8::I8Tensor, tensor_bool::BoolTensor
};
impl I32Tensor of TensorTrait<i32> {
fn new(shape: Span<usize>, data: Span<i32>) -> Tensor<i32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: i32) -> Tensor<i32> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<i32>, indices: Span<usize>) -> i32 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<i32>) -> i32 {
math::min_in_tensor::min_in_tensor::<i32>(*self.data)
}
fn min(tensors: Span<Tensor<i32>>) -> Tensor<i32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<i32>) -> i32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<i32>>) -> Tensor<i32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<i32>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<i32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<i32>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<i32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<i32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<i32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<i32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<i32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<i32>, axes: Span<usize>) -> Tensor<i32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<i32>) -> Tensor<i32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<i32>) -> Tensor<i32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<i32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<i32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<i32>, axis: usize) -> Tensor<i32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<i32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<i32>>, axis: usize,) -> Tensor<i32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<i32>, y_scale: @Tensor<i32>, y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<i32>, x_zero_point: @Tensor<i32>
) -> Tensor::<i32> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<i32>>,
zero_points: Span<Tensor<i32>>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<i32>, a_zero_point: @Tensor<i32>, alpha: i32
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<i32>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<i32> {
core_tensor::slice::<i32>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<i32>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<i32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<i32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<i32>, axes: Option<Span<usize>>) -> Tensor<i32> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<i32>, axes: Span<usize>) -> Tensor<i32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<i32>) -> Tensor<i32> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<i32>, min: Option<i32>, max: Option<i32>) -> Tensor<i32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<i32>) -> Tensor<i32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<i32>, x: @Tensor<i32>, y: @Tensor<i32>) -> Tensor<i32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<i32>) -> Tensor<i32> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<i32>, upper: bool, k: i64) -> Tensor<i32> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<i32>,
updates: Tensor<i32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<i32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<i32>, indices: Tensor<usize>) -> Tensor<i32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<i32>, threshold: Option<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<i32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<i32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<i32>, bias: Option<i32>, lambd: Option<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<i32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<i32>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<i32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<i32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<i32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<i32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<i32>,
roi: Option<Tensor<i32>>,
scales: Option<Span<i32>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<i32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<i32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<i32>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<i32> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<i32>,
scale: @Tensor<i32>,
B: Option<@Tensor<i32>>,
axis: Option<i32>,
epsilon: Option<i32>,
stash_type: Option<usize>,
) -> (Tensor<i32>, Tensor<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<i32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<i32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<i32>, high: Option<i32>, low: Option<i32>, seed: Option<usize>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn range(start: i32, end: i32, step: i32) -> Tensor<i32> {
math::range::range(start, end, step)
}
fn hann_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn hamming_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn blackman_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<i32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<i32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<i32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<i32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<i32>) -> Option<Tensor<i32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<i32>) -> (Tensor::<u32>, Tensor::<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<i32>, updates: Tensor<i32>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<i32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<i32>,
default_list: Option<Span<i32>>,
default_tensor: Option<Tensor<i32>>,
keys: Option<Span<i32>>,
keys_tensor: Option<Tensor<i32>>,
values: Option<Span<i32>>,
values_tensor: Option<Tensor<i32>>
) -> Tensor<i32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<i32>` using the `Add` trait.
impl I32TensorAdd of Add<Tensor<i32>> {
/// Adds two `Tensor<i32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i32>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<i32>` using the `Sub` trait.
impl I32TensorSub of Sub<Tensor<i32>> {
/// Subtracts two `Tensor<i32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i32>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<i32>` using the `Mul` trait.
impl I32TensorMul of Mul<Tensor<i32>> {
/// Multiplies two `Tensor<i32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i32>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<i32>` using the `Div` trait.
impl I32TensorDiv of Div<Tensor<i32>> {
/// Divides two `Tensor<i32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i32>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<i32>` using the `PartialEq` trait.
impl I32TensorPartialEq of PartialEq<Tensor<i32>> {
fn eq(lhs: @Tensor<i32>, rhs: @Tensor<i32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<i32>, rhs: @Tensor<i32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl I8TryIntoI8 of TryInto<i32, i32> {
fn try_into(self: i32) -> Option<i32> {
Option::Some(self)
}
}
impl TensorI8IntoTensorI32 of Into<Tensor<i8>, Tensor<i32>> {
fn into(self: Tensor<i8>) -> Tensor<i32> {
tensor_i8_to_tensor_i32(@self)
}
}
/// Implements partial ord for two `Tensor<i32>` using `PartialOrd` trait.
impl I32TensorPartialOrd of PartialOrd<Tensor<i32>> {
#[inline(always)]
fn ge(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
fn tensor_eq(mut lhs: Tensor<i32>, mut rhs: Tensor<i32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
}
fn tensor_i8_to_tensor_i32(x: @Tensor<i8>) -> Tensor<i32> {
let mut result_data = ArrayTrait::<i32>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_i8.cairo | use orion::numbers::{I8Div, I8DivEq};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{tensor_u32::U32Tensor, tensor_bool::BoolTensor};
impl I8Tensor of TensorTrait<i8> {
fn new(shape: Span<usize>, data: Span<i8>) -> Tensor<i8> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: i8) -> Tensor<i8> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<i8>, indices: Span<usize>) -> i8 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<i8>) -> i8 {
math::min_in_tensor::min_in_tensor::<i8>(*self.data)
}
fn min(tensors: Span<Tensor<i8>>) -> Tensor<i8> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<i8>) -> i8 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<i8>>) -> Tensor<i8> {
math::max::max(tensors)
}
fn stride(self: @Tensor<i8>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<i8>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<i8>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<i8>, target_shape: Span<i32>, allowzero: bool) -> Tensor<i8> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<i8>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<i8>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<i8>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<i8>, axes: Span<usize>) -> Tensor<i8> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<i8>) -> Tensor<i8> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<i8>) -> Tensor<i8> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<i8>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<i8> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<i8>, axis: usize) -> Tensor<i8> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<i8>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<i8>>, axis: usize,) -> Tensor<i8> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<i8>, y_scale: @Tensor<i8>, y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<i8>, x_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor<i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor<i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor<i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<i8>>,
zero_points: Span<Tensor<i8>>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<i8>, a_zero_point: @Tensor<i8>, alpha: i8
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<i8>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<i8> {
core_tensor::slice::<i8>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<i8>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<i8> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<i8>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<i8>, axes: Option<Span<usize>>) -> Tensor<i8> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<i8>, axes: Span<usize>) -> Tensor<i8> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<i8>) -> Tensor<i8> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<i8>, min: Option<i8>, max: Option<i8>) -> Tensor<i8> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<i8>) -> Tensor<i8> {
core_tensor::identity(self)
}
fn where(self: @Tensor<i8>, x: @Tensor<i8>, y: @Tensor<i8>) -> Tensor<i8> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<i8>) -> Tensor<i8> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<i8>, upper: bool, k: i64) -> Tensor<i8> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<i8>,
updates: Tensor<i8>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<i8> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<i8>, indices: Tensor<usize>) -> Tensor<i8> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<i8>, threshold: Option<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<i8>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<i8> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<i8>, bias: Option<i8>, lambd: Option<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<i8>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<i8>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<i8>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<i8> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported'])
}
fn erf(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<i8>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<i8>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<i8>,
roi: Option<Tensor<i8>>,
scales: Option<Span<i8>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<i8>,
exclude_outside: Option<bool>,
extrapolation_value: Option<i8>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<i8>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<i8> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<i8>,
scale: @Tensor<i8>,
B: Option<@Tensor<i8>>,
axis: Option<i32>,
epsilon: Option<i8>,
stash_type: Option<usize>,
) -> (Tensor<i8>, Tensor<i8>, Tensor<i8>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<i8>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<i8>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<i8>, high: Option<i8>, low: Option<i8>, seed: Option<usize>
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn range(start: i8, end: i8, step: i8) -> Tensor<i8> {
math::range::range(start, end, step)
}
fn hann_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn hamming_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn blackman_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<i8>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<i8>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<i8>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<i8> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<i8>) -> Option<Tensor<i8>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<i8>) -> (Tensor::<u32>, Tensor::<i8>, Tensor<i8>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<i8>, updates: Tensor<i8>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<i8> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<i8>,
default_list: Option<Span<i8>>,
default_tensor: Option<Tensor<i8>>,
keys: Option<Span<i8>>,
keys_tensor: Option<Tensor<i8>>,
values: Option<Span<i8>>,
values_tensor: Option<Tensor<i8>>
) -> Tensor<i8> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<i8>` using the `Add` trait.
impl I8TensorAdd of Add<Tensor<i8>> {
/// Adds two `Tensor<i8>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i8>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<i8>` using the `Sub` trait.
impl I8TensorSub of Sub<Tensor<i8>> {
/// Subtracts two `Tensor<i8>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i8>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<i8>` using the `Mul` trait.
impl I8TensorMul of Mul<Tensor<i8>> {
/// Multiplies two `Tensor<i8>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i8>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<i8>` using the `Div` trait.
impl I8TensorDiv of Div<Tensor<i8>> {
/// Divides two `Tensor<i8>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<i8>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<i8>` using the `PartialEq` trait.
impl I8TensorPartialEq of PartialEq<Tensor<i8>> {
fn eq(lhs: @Tensor<i8>, rhs: @Tensor<i8>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<i8>, rhs: @Tensor<i8>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
/// Implements partial ord for two `Tensor<i8>` using `PartialOrd` trait.
impl I8TensorPartialOrd of PartialOrd<Tensor<i8>> {
#[inline(always)]
fn ge(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
fn tensor_eq(mut lhs: Tensor<i8>, mut rhs: Tensor<i8>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() == 0 && !is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/implementations/tensor_u32.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_bool::BoolTensor};
impl U32Tensor of TensorTrait<u32> {
fn new(shape: Span<usize>, data: Span<u32>) -> Tensor<u32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: u32) -> Tensor<u32> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<u32>, indices: Span<usize>) -> u32 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<u32>) -> u32 {
math::min_in_tensor::min_in_tensor::<u32, u32>(*self.data)
}
fn min(tensors: Span<Tensor<u32>>) -> Tensor<u32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<u32>) -> u32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<u32>>) -> Tensor<u32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<u32>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<u32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<u32>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<u32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<u32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<u32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<u32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<u32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<u32>, axes: Span<usize>) -> Tensor<u32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<u32>) -> Tensor<u32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<u32>) -> Tensor<u32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<u32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<u32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<u32>, axis: usize) -> Tensor<u32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<u32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<u32>>, axis: usize,) -> Tensor<u32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<u32>, y_scale: @Tensor<u32>, y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<u32>, x_zero_point: @Tensor<u32>
) -> Tensor::<u32> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<u32>>,
zero_points: Span<Tensor<u32>>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>,
axis: usize,
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<u32>, a_zero_point: @Tensor<u32>, alpha: u32
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<u32>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<u32> {
core_tensor::slice::<u32>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<u32>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<u32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<u32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<u32>, axes: Option<Span<usize>>) -> Tensor<u32> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<u32>, axes: Span<usize>) -> Tensor<u32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn clip(self: @Tensor<u32>, min: Option<u32>, max: Option<u32>) -> Tensor<u32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<u32>) -> Tensor<u32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<u32>, x: @Tensor<u32>, y: @Tensor<u32>) -> Tensor<u32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<u32>) -> Tensor<u32> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<u32>, upper: bool, k: i64) -> Tensor<u32> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<u32>,
updates: Tensor<u32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<u32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<u32>, indices: Tensor<usize>) -> Tensor<u32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<u32>, threshold: Option<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<u32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<u32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<u32>, bias: Option<u32>, lambd: Option<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<u32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<u32>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<u32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<u32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<u32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<u32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<u32>,
roi: Option<Tensor<u32>>,
scales: Option<Span<u32>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<u32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<u32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<u32>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<u32> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<u32>,
scale: @Tensor<u32>,
B: Option<@Tensor<u32>>,
axis: Option<i32>,
epsilon: Option<u32>,
stash_type: Option<usize>,
) -> (Tensor<u32>, Tensor<u32>, Tensor<u32>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<u32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<u32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<u32>, high: Option<u32>, low: Option<u32>, seed: Option<usize>
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn range(start: u32, end: u32, step: u32) -> Tensor<u32> {
math::range::range(start, end, step)
}
fn hann_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn hamming_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn blackman_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<u32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<u32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<u32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<u32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<u32>) -> Option<Tensor<u32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<u32>) -> (Tensor::<u32>, Tensor::<u32>, Tensor<u32>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<u32>, updates: Tensor<u32>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<u32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<u32>,
default_list: Option<Span<u32>>,
default_tensor: Option<Tensor<u32>>,
keys: Option<Span<u32>>,
keys_tensor: Option<Tensor<u32>>,
values: Option<Span<u32>>,
values_tensor: Option<Tensor<u32>>
) -> Tensor<u32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
/// Implements addition for `Tensor<u32>` using the `Add` trait.
impl U32TensorAdd of Add<Tensor<u32>> {
/// Adds two `Tensor<u32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<u32>` instance representing the result of the element-wise addition.
fn add(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::add(@lhs, @rhs)
}
}
/// Implements subtraction for `Tensor<u32>` using the `Sub` trait.
impl U32TensorSub of Sub<Tensor<u32>> {
/// Subtracts two `Tensor<u32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<u32>` instance representing the result of the element-wise subtraction.
fn sub(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
/// Implements multiplication for `Tensor<u32>` using the `Mul` trait.
impl U32TensorMul of Mul<Tensor<u32>> {
/// Multiplies two `Tensor<u32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<u32>` instance representing the result of the element-wise multiplication.
fn mul(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
/// Implements division for `Tensor<u32>` using the `Div` trait.
impl U32TensorDiv of Div<Tensor<u32>> {
/// Divides two `Tensor<u32>` instances element-wise.
///
/// # Arguments
/// * `lhs` - The first tensor.
/// * `rhs` - The second tensor.
///
/// # Returns
/// * A `Tensor<u32>` instance representing the result of the element-wise division.
fn div(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::div(@lhs, @rhs)
}
}
/// Implements partial equal for two `Tensor<u32>` using the `PartialEq` trait.
impl U32TensorPartialEq of PartialEq<Tensor<u32>> {
fn eq(lhs: @Tensor<u32>, rhs: @Tensor<u32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<u32>, rhs: @Tensor<u32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoI8 of TryInto<u32, i8> {
fn try_into(self: u32) -> Option<i8> {
let number_felt: felt252 = self.into();
let number_i8: i8 = number_felt.try_into().unwrap();
Option::Some(number_i8)
}
}
/// Implements partial ord for two `Tensor<u32>` using `PartialOrd` trait.
impl U32TensorPartialOrd of PartialOrd<Tensor<u32>> {
#[inline(always)]
fn ge(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
#[inline(always)]
fn gt(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
#[inline(always)]
fn le(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
#[inline(always)]
fn lt(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
// Internals
fn tensor_eq(mut lhs: Tensor<u32>, mut rhs: Tensor<u32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/linalg.cairo | mod matmul;
mod transpose;
mod trilu;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/linalg/matmul.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::matmul docstring
fn matmul<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let self_shape = *self.shape;
let other_shape = *other.shape;
let self_ndim = (self_shape).len();
let other_ndim = (other_shape).len();
assert(self_ndim <= 2 || other_ndim <= 2, 'supports only 1D and 2D matmul');
//! Case: Both tensors are 1-dimensional
if self_ndim == 1 && other_ndim == 1 {
let dot = dot_product((*self).data, (*other).data);
let mut result_shape = ArrayTrait::new();
let mut result_data = ArrayTrait::new();
result_shape.append(1);
result_data.append(dot);
return TensorTrait::new(result_shape.span(), result_data.span());
}
let self_shape = prepare_shape_for_matmul(self_shape, true);
let other_shape = prepare_shape_for_matmul(other_shape, false);
let result = matrix_multiply(*self.data, self_shape, *other.data, other_shape);
let result_shape = adjust_output_shape_after_matmul(result.shape, self_ndim, other_ndim);
TensorTrait::new(result_shape, result.data)
}
/// Computes the dot product of two 1-dimensional i32 tensors.
///
/// # Arguments
/// * `vec1` - A span containing the data elements of the first vector as i32 elements.
/// * `vec2` - A span containing the data elements of the second vector as i32 elements.
///
/// # Panics
/// * Panics if the lengths of the vectors do not match.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * An i32 representing the dot product of the two vectors.
fn dot_product<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec1: Span<T>, mut vec2: Span<T>
) -> T {
assert(vec1.len() == vec2.len(), 'vector lengths do not match');
let mut result: T = NumberTrait::zero();
loop {
match vec1.pop_front() {
Option::Some(vec1_item) => {
let element_product = *vec1_item * *vec2.pop_front().unwrap();
result += element_product;
},
Option::None => { break; }
};
};
result
}
/// Computes the matrix multiplication of two 2-dimensional i32 tensors.
///
/// # Arguments
/// * `mat1` - A Span containing the data elements of the first matrix as i32 elements.
/// * `mat1_shape` - A Span containing the shape of the first matrix as usize elements.
/// * `mat2` - A Span containing the data elements of the second matrix as i32 elements.
/// * `mat2_shape` - A Span containing the shape of the second matrix as usize elements.
///
/// # Panics
/// * Panics if the inner dimensions of the matrices do not match.
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * Returns the restulting i32 tensor.
fn matrix_multiply<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mat1: Span<T>, mat1_shape: Span<usize>, mat2: Span<T>, mat2_shape: Span<usize>
) -> Tensor<T> {
let m = *mat1_shape[0];
let n = *mat1_shape[1];
let p = *mat2_shape[1];
let mut result_data: Array<T> = array![];
let mut result_shape: Array<usize> = array![m, p];
let mut i = 0_usize;
while i != m {
let mut j = 0_usize;
while j != p {
let mut sum: T = NumberTrait::zero();
let mut k = 0_usize;
while k != n {
let mat1_index = i * n + k;
let mat2_index = k * p + j;
sum += *mat1[mat1_index] * *mat2[mat2_index];
k += 1;
};
result_data.append(sum);
j += 1;
};
i += 1;
};
TensorTrait::new(result_shape.span(), result_data.span())
}
/// Prepares the shape of a tensor for matrix multiplication.
///
/// # Arguments
/// * `shape` - A mutable span representing the shape of the tensor.
/// * `is_first_tensor` - A boolean indicating whether the input tensor is the first (left)
/// tensor in the matrix multiplication operation.
///
/// # Behavior
/// This function adjusts the shapes of the tensors based on their dimensionality:
/// * If the first tensor is 1-dimensional, a 1 is prepended to its shape.
/// * If the second tensor is 1-dimensional, a 1 is appended to its shape.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A span representing the adjusted shape of the tensor.
fn prepare_shape_for_matmul(mut shape: Span<usize>, is_first_tensor: bool) -> Span<usize> {
let ndim = shape.len();
if ndim == 1 && is_first_tensor {
// Prepend 1 to shape if it's 1-dimensional
let mut shape_adjusted = ArrayTrait::new();
shape_adjusted.append(1);
loop {
match shape.pop_front() {
Option::Some(item) => { shape_adjusted.append(*item); },
Option::None => { break; }
};
};
return shape_adjusted.span();
} else if ndim == 1 && !is_first_tensor {
// Append 1 to shape if it's 1-dimensional
let mut shape_adjusted = ArrayTrait::new();
loop {
match shape.pop_front() {
Option::Some(item) => { shape_adjusted.append(*item) },
Option::None => { break; }
};
};
shape_adjusted.append(1);
return shape_adjusted.span();
}
shape
}
/// Adjusts the output shape of the matrix multiplication result based on the
/// original dimensionality of the input tensors.
///
/// # Arguments
/// * `output_shape` - A mutable span representing the shape of the matrix multiplication result.
/// * `self_dim` - A usize representing the dimensionality of the first input tensor.
/// * `other_dim` - A usize representing the dimensionality of the second input tensor.
///
/// # Behavior
/// This function adjusts the output shape based on the dimensionality of the input tensors:
/// * If the first input tensor was 1-dimensional, the prepended 1 is removed from the output shape.
/// * If the second input tensor was 1-dimensional, the appended 1 is removed from the output shape.
///
/// # Returns
/// * A span representing the adjusted output shape of the matrix multiplication result.
fn adjust_output_shape_after_matmul(
mut output_shape: Span<usize>, self_dim: usize, other_dim: usize
) -> Span<usize> {
// If self_shape was 1-dimensional, remove the prepended 1 from the output_shape.
if self_dim == 1 {
let _ = output_shape.pop_front().unwrap();
}
// If other_shape was 1-dimensional, remove the appended 1 from the output_shape.
if other_dim == 1 {
let _ = output_shape.pop_back().unwrap();
}
output_shape
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/linalg/transpose.cairo | use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape
};
use orion::operators::tensor::helpers::{len_from_shape, find_axis, permutation_output_shape};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::transpose docstring
fn transpose<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>, axes: Span<usize>
) -> Tensor<T> {
if (*self.shape).len() == 1 {
return self.identity();
}
assert(axes.len() == (*self.shape).len(), 'shape and axes length unequal');
if (*self.shape).len() == 2 {
return transpose2D(@(*self));
}
let output_shape = permutation_output_shape(*self.shape, axes);
let output_data_len = len_from_shape(output_shape);
let mut output_data: Array<T> = array![];
let mut output_index: usize = 0;
while output_index != output_data_len {
let output_indices = unravel_index(output_index, output_shape);
let mut input_indices: Array<u32> = array![];
let mut output_axis: usize = 0;
while output_axis != axes.len() {
let input_axis = find_axis(axes, output_axis);
input_indices.append(*output_indices[input_axis]);
output_axis += 1;
};
let input_index = ravel_index(*self.shape, input_indices.span());
output_data.append(*(*self.data)[input_index]);
output_index += 1;
};
TensorTrait::new(output_shape, output_data.span())
}
fn transpose2D<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>
) -> Tensor<T> {
assert((*self.shape).len() == 2, 'transpose a 2D tensor');
let mut output_data: Array<T> = array![];
let n = *self.shape[0];
let m = *self.shape[1];
let mut output_shape: Array<u32> = array![m, n];
let mut j: usize = 0;
while j != m {
let mut i = 0;
while i != n {
output_data.append(*(*self.data)[i * m + j]);
i += 1;
};
j += 1;
};
TensorTrait::new(output_shape.span(), output_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/linalg/trilu.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::trilu docstring
fn trilu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, upper: bool, k: i64
) -> Tensor<T> {
assert((*self.shape).len() >= 2, 'must have at least 2 dimensions');
let shape_len = (*self.shape).len();
let mut output_data: Array<T> = array![];
let mut output_size: Array<u32> = array![];
let mut batch_size = 1;
let mut n: u32 = 0;
let mut m: u32 = 0;
let mut self_shape = *self.shape;
let mut i = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == shape_len - 2 {
n = *val;
} else if i == shape_len - 1 {
m = *val;
} else {
batch_size *= *val;
}
i += 1;
output_size.append(*val);
},
Option::None => { break; }
}
};
let mut self_data = *self.data;
let mut b = 0;
loop {
if b == batch_size {
break ();
}
let mut i = 0;
loop {
if i == n {
break ();
}
let mut j = 0;
loop {
if j == m {
break ();
}
let ii: felt252 = i.into();
let jj: felt252 = j.into();
let iii: i64 = ii.try_into().unwrap();
let jjj: i64 = jj.try_into().unwrap();
let result = match self_data.pop_front() {
Option::Some(val) => {
if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) {
*val
} else {
NumberTrait::zero()
}
},
Option::None => { break; }
};
output_data.append(result);
j += 1;
};
i += 1;
};
b += 1;
};
TensorTrait::new(*self.shape, output_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation.cairo | mod unique;
mod split;
mod split_to_sequence;
mod reverse_sequence;
mod optional;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation/optional.cairo | use orion::operators::tensor::{Tensor, TensorTrait};
/// Cf: TensorTrait::optional docstring
fn optional<T, +Copy<T>, +Drop<T>, impl TOption: OptionTrait<T>>(
self: @Tensor<T>
) -> Option<Tensor<T>> {
Option::Some(*self)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation/reverse_sequence.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: TensorTrait::reverse_sequence docstring
fn reverse_sequence<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<T> {
let shape = *self.shape;
let mut data: Array<T> = array![];
let has_batch_axis: usize = match batch_axis {
Option::Some(value) => {
assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0.");
value
},
Option::None => 0,
};
let has_time_axis: usize = match time_axis {
Option::Some(value) => {
assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0.");
value
},
Option::None => 1,
};
assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal");
assert((*self.data).len() >= 2, 'Tensor of rank r >= 2');
let control: bool = if has_batch_axis == 0 && has_time_axis == 1 {
true
} else {
false
};
let mut index: Array<usize> = reverse_index(*self.shape, sequence_lens, control);
loop {
match index.pop_front() {
Option::Some(ele) => { data.append(*((*self).data).at(ele)); },
Option::None => { break; }
}
};
TensorTrait::<T>::new(shape, data.span())
}
fn reverse_index(shape: Span<usize>, sequence_lens: Tensor<usize>, control: bool) -> Array<usize> {
let x: usize = *shape.at(0);
let y: usize = *shape.at(1);
let mut result: Array<usize> = array![];
if control {
// [i, slice]
assert!(
sequence_lens.data.len() <= x, "The length of sequence_lens cannot exceed batch_axis"
);
let mut i: usize = 0;
while i != x {
let reverse: usize = (*sequence_lens.data.at(i));
assert!(
reverse <= y && reverse >= 1,
"sequence_lens must be greater than one and less than batch_size"
);
let mut j: usize = reverse - 1;
loop {
if j == 0 {
result.append(i * y + j);
break;
}
result.append(i * y + j);
j -= 1;
};
let current_index_len: usize = (i + 1) * y - 1;
let mut j: usize = result.len();
while j != current_index_len + 1 {
result.append(j);
j += 1;
};
i += 1;
};
} else {
// [slice, i]
assert!(
sequence_lens.data.len() <= y, "The length of sequence_lens cannot exceed time_axis"
);
let mut tmp = ArrayTrait::<usize>::new();
let mut i: usize = 0;
while i != y {
let reverse: usize = *sequence_lens.data.at(i);
assert!(
reverse <= x && reverse >= 1,
"sequence_lens must be greater than one and less than batch_size"
);
let mut j: usize = reverse - 1;
loop {
if j == 0 {
tmp.append(j * y + i);
break;
}
tmp.append(j * y + i);
j -= 1;
};
let mut j: usize = reverse;
while j != x {
tmp.append(j * y + i);
j += 1;
};
i += 1;
};
let tmp = tmp.span();
let mut i: usize = 0;
while i != x {
let mut j: usize = 0;
while j != y {
result.append((*tmp.at(j * x + i)));
j += 1;
};
i += 1;
};
}
result
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation/split.cairo | use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor};
use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl};
/// Cf: TensorTrait::split docstring
fn split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
self: @Tensor<T>, axis: usize, num_outputs: Option<usize>, split: Option<Tensor<usize>>
) -> Array<Tensor<T>> {
let has_num_outputs = match num_outputs {
Option::Some => true,
Option::None => false,
};
let has_split = match split {
Option::Some => true,
Option::None => false,
};
assert(!(has_num_outputs && has_split), 'split or num_outputs not both.');
assert(has_num_outputs || has_split, 'split or num_outputs not both.');
let mut splited_t: Array<Tensor<T>> = array![];
let rank = (*self).shape.len();
// assert(axis < rank && axis > -rank, 'axis out of dimensions');
assert(axis < rank, 'axis out of dimensions');
if (has_num_outputs) {
splited_t = split_num_outputs(self, axis, num_outputs.unwrap());
} else {
splited_t = split_has_split(self, axis, split.unwrap());
}
splited_t
}
/// Subfunction split for tensors (wth num_outputs).
/// Cf: TensorTrait::split docstring
fn split_num_outputs<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, mut axis: usize, num_outputs: usize
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut div: usize = 0;
// consturct split array
let mut split: Array<usize> = array![];
// if axis==0 {
// axis = 1;
// }
if (*(*t).shape.at(axis) % num_outputs == 0) {
div = *(*t).shape.at(axis) / num_outputs;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
} else {
div = *(*t).shape.at(axis) / num_outputs + 1;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
match split.pop_front() {
Option::Some(split_last_one) => {
split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1));
},
Option::None => { assert(false, 'split is none array'); }
}
}
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t).shape.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split.len() {
let spl = *split.at(i);
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
}
/// Subfunction split for tensors (wth split).
/// Cf: TensorTrait::split docstring
fn split_has_split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, axis: usize, split: Tensor<u32>
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t).shape.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split.data.len() {
let spl: usize = split.at(indices: array![i].span());
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation/split_to_sequence.cairo | use core::option::OptionTrait;
use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor};
use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl};
/// Cf: NNTrait::split docstring
fn split_to_sequence<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
self: @Tensor<T>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<T>> {
let has_split = match split {
Option::Some => true,
Option::None => false,
};
let mut has_num_outputs = false;
let mut split_unwrap: Tensor<usize> = TensorTrait::new(array![1].span(), array![1].span());
if (!has_split) {
let split_length = *(*self.shape).at(axis);
let mut split_data: Array<usize> = array![];
let mut i = 0;
while i != split_length {
split_data.append(1);
i += 1;
};
split_unwrap = TensorTrait::new(array![split_length].span(), split_data.span());
} else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) {
// A scalar
has_num_outputs = true;
split_unwrap = split.unwrap();
} else {
split_unwrap = split.unwrap();
}
let mut splited_t: Array<Tensor<T>> = array![];
let rank = (*self).shape.len();
// assert(axis < rank && axis > -rank, 'axis out of dimensions');
assert(axis < rank, 'axis out of dimensions');
if (has_num_outputs) {
splited_t = split_num_outputs(self, axis, *(split_unwrap.data).at(0));
} else {
splited_t = split_has_split(self, axis, split_unwrap);
}
if (keepdims == 0 && !has_split) {
let mut splited_t_temp: Array<Tensor<T>> = array![];
let mut i = 0;
while i != splited_t
.len() {
let mut shape: Array<i32> = array![];
let mut j = 0;
let shape_in_splited: Span<usize> = *splited_t.at(i).shape;
while j != shape_in_splited
.len() {
if (j != axis) {
shape.append((*shape_in_splited.at(j)).try_into().unwrap())
}
j += 1;
};
splited_t_temp.append(splited_t[i].reshape(shape.span(), false));
i += 1;
};
return splited_t_temp;
}
splited_t
}
/// Subfunction split for tensors (wth num_outputs).
/// Cf: TensorTrait::split docstring
fn split_num_outputs<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, mut axis: usize, num_outputs: usize
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut div: usize = 0;
// consturct split array
let mut split: Array<usize> = array![];
// if axis==0 {
// axis = 1;
// }
if (*(*t).shape.at(axis) % num_outputs == 0) {
div = *(*t).shape.at(axis) / num_outputs;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
} else {
div = *(*t).shape.at(axis) / num_outputs + 1;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
match split.pop_front() {
Option::Some(split_last_one) => {
split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1));
},
Option::None => { assert(false, 'split is none array'); }
}
}
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t)
.shape
.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split
.len() {
let spl = *split.at(i);
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
}
/// Subfunction split for tensors (wth split).
/// Cf: TensorTrait::split docstring
fn split_has_split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, axis: usize, split: Tensor<u32>
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t)
.shape
.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split
.data
.len() {
let spl: usize = split.at(indices: array![i].span());
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => { res },
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => { res },
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
} | https://github.com/gizatechxyz/orion |
src/operators/tensor/manipulation/unique.cairo | use alexandria_data_structures::array_ext::{SpanTraitExt, ArrayTraitExt};
use alexandria_sorting::merge_sort::merge;
use orion::numbers::{NumberTrait, U32IntoI32};
use orion::operators::tensor::core::{Tensor, TensorTrait, stride};
use orion::operators::tensor::helpers::{as_tensors_array, flatten_array_of_tensors};
/// Cf: TensorTrait::unique docstring
fn unique<
T,
+Copy<T>,
+Drop<T>,
+TensorTrait<T>,
+PartialOrd<T>,
+PartialEq<T>,
+PartialEq<Tensor<T>>,
+PartialOrd<Tensor<T>>
>(
self: @Tensor<T>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<T>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
let sorted = match sorted {
Option::Some(sorted) => sorted,
Option::None => true,
};
let (unique_elements, new_shape, indices, inverse_indices, count) = if axis.is_none() {
unique_flatten(self, sorted)
} else {
unique_along_axis(self, axis.unwrap(), sorted)
};
let unique_elements = Tensor::<T> { shape: new_shape, data: unique_elements };
let indices = Tensor::<i32> { shape: array![indices.len()].span(), data: indices };
let inverse_indices = Tensor::<
i32
> { shape: array![inverse_indices.len()].span(), data: inverse_indices };
let count = Tensor::<i32> { shape: array![count.len()].span(), data: count };
(unique_elements, indices, inverse_indices, count)
}
/// Subfunction unique for flatten tensors (no axis).
/// Cf: TensorTrait::unique docstring
fn unique_flatten<T, +Copy<T>, +Drop<T>, +PartialOrd<T>, +PartialEq<T>,>(
t: @Tensor<T>, sorted: bool
) -> (Span<T>, Span<usize>, Span<i32>, Span<i32>, Span<i32>) {
let mut indices: Array<i32> = array![];
let mut inverse_indices: Array<i32> = array![];
let mut count: Array<i32> = array![];
let mut unique_elements = (*t.data).unique();
let mut new_shape: Array<usize> = array![unique_elements.len()];
if (sorted) {
unique_elements = merge(unique_elements);
}
let mut unique_elements_span = unique_elements.span();
let mut data_cpy = *(t.data);
loop {
match unique_elements_span.pop_front() {
Option::Some(value) => {
let occurences = data_cpy.occurrences_of(*value);
count.append(occurences.into());
let idx_in_data = data_cpy.index_of(*value).unwrap();
indices.append(idx_in_data.into());
},
Option::None => { break; }
}
};
unique_elements_span = unique_elements.span();
loop {
match data_cpy.pop_front() {
Option::Some(value) => {
let idx_in_uniques = unique_elements_span.index_of(*value).unwrap();
inverse_indices.append(idx_in_uniques.into());
},
Option::None => { break; }
}
};
(unique_elements.span(), new_shape.span(), indices.span(), inverse_indices.span(), count.span())
}
/// Subfunction unique for tensors (wth axis).
/// Cf: TensorTrait::unique docstring
fn unique_along_axis<
T,
+Copy<T>,
+Drop<T>,
+PartialOrd<T>,
+PartialEq<T>,
+TensorTrait<T>,
+PartialEq<Tensor<T>>,
+PartialOrd<Tensor<T>>
>(
t: @Tensor<T>, axis: usize, sorted: bool
) -> (Span<T>, Span<usize>, Span<i32>, Span<i32>, Span<i32>) {
let mut new_shape: Array<usize> = array![];
let mut indices: Array<i32> = array![];
let mut inverse_indices: Array<i32> = array![];
let mut count: Array<i32> = array![];
let rank = (*t).shape.len();
assert(axis < rank, 'axis out of dimensions');
let all_tensors = as_tensors_array(t, axis);
let mut unique_tensors = all_tensors.unique();
let mut unique_tensors_len = unique_tensors.len();
let mut i = 0;
while i != rank {
new_shape.append(if axis == i {
unique_tensors_len
} else {
*(*t).shape.at(i)
});
i += 1;
};
if (sorted) {
unique_tensors = merge(unique_tensors);
}
let mut all_tensors_span = all_tensors.span();
let mut unique_tensors_span = unique_tensors.span();
loop {
match unique_tensors_span.pop_front() {
Option::Some(t) => {
let occurences = all_tensors_span.occurrences_of(*t);
count.append(occurences.into());
let idx_in_all = all_tensors_span.index_of(*t).unwrap();
indices.append(idx_in_all.into());
},
Option::None => { break; }
}
};
unique_tensors_span = unique_tensors.span();
loop {
match all_tensors_span.pop_front() {
Option::Some(t) => {
let idx_in_uniques = unique_tensors_span.index_of(*t).unwrap();
inverse_indices.append(idx_in_uniques.into());
},
Option::None => { break; }
}
};
let new_shape_span = new_shape.span();
let unique_elements = flatten_array_of_tensors(unique_tensors, axis, new_shape_span);
(unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math.cairo | mod min_in_tensor;
mod min;
mod max_in_tensor;
mod max;
mod reduce_sum;
mod reduce_prod;
mod argmax;
mod argmin;
mod exp;
mod log;
mod arithmetic;
mod equal;
mod greater;
mod greater_equal;
mod less;
mod less_equal;
mod abs;
mod ceil;
mod sin;
mod cos;
mod asin;
mod cumsum;
mod flatten;
mod sinh;
mod tanh;
mod cosh;
mod acosh;
mod asinh;
mod atan;
mod xor;
mod or;
mod acos;
mod onehot;
mod sqrt;
mod concat;
mod gather;
mod sign;
mod and;
mod neg;
mod where;
mod not;
mod round;
mod scatter;
mod binarizer;
mod reduce_l2;
mod reduce_l1;
mod reduce_sum_square;
mod bitwise_and;
mod bitwise_xor;
mod bitwise_or;
mod gather_elements;
mod reduce_min;
mod shrink;
mod reduce_mean;
mod pow;
mod is_nan;
mod is_inf;
mod gather_nd;
mod reduce_log_sum;
mod erf;
mod reduce_log_sum_exp;
mod layer_normalization;
mod resize;
mod compress;
mod random_uniform_like;
mod range;
mod hann_window;
mod hamming_window;
mod blackman_window;
mod scatter_nd;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/abs.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::abs docstring
fn abs<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).abs()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/acos.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::acos docstring
fn acos<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).acos()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/acosh.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::acosh docstring
fn acosh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).acosh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/and.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn and(y: @Tensor<bool>, z: @Tensor<bool>) -> Tensor<bool> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<bool> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]);
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/argmax.cairo | use core::option::OptionTrait;
use core::traits::TryInto;
use orion::operators::tensor::{core::{Tensor, TensorTrait, ravel_index, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::argmax docstring
fn argmax<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
let keepdims = keepdims.unwrap_or(true);
let select_last_index = select_last_index.unwrap_or(false);
// Convert negative axis to positive
let axis = if axis < 0 {
((*self.shape).len().try_into().unwrap() + axis).try_into().unwrap()
} else {
axis.try_into().unwrap()
};
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
if (*self.shape).len() == 1 {
return find_argmax_1D::<T>(*self, axis, true, select_last_index);
}
let mut output_data: Array<i32> = array![];
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let MIN = NumberTrait::min_value();
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_argmax = find_argmax(self, output_indices, axis, 0, MIN, 0, select_last_index);
output_data.append(current_argmax);
index += 1;
};
TensorTrait::<i32>::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span())
}
/// Helper function that finds the index of the maximum value in a flat tensor.
///
/// # Arguments
/// * `input` - The input tensor.
/// * `axis` - The axis along which to find the maximum value.
/// * `keepdims` - Whether to keep the reduced dimension or not.
/// * `select_last_index` - Whether to selƒect last occurrence of the max value along the axis.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize value representing the index of the maximum value along the specified axis.
fn find_argmax_1D<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut input: Tensor<T>, axis: usize, keepdims: bool, select_last_index: bool
) -> Tensor<i32> {
let mut output_data = ArrayTrait::<i32>::new();
let mut max = match input.data.pop_front() {
Option::Some(item) => *item,
Option::None => {
return TensorTrait::<
i32
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
};
let mut max_index = 0;
let mut count = 0;
loop {
match input.data.pop_front() {
Option::Some(item) => {
count += 1;
if *item > max {
max = *item;
max_index = count;
} else {
if select_last_index && item == @max {
max_index = count;
}
};
},
Option::None => { break; }
};
};
output_data.append(max_index);
return TensorTrait::<
i32
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
/// Recursive helper function that finds the index of the maximum value along a specific axis.
///
/// # Arguments
/// * `input` - The input tensor.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to find the maximum value.
/// * `axis_index` - The current index along the specified axis.
/// * `max_value` - The current maximum value found along the axis.
/// * `argmax` - The current index of the maximum value along the axis.
/// * `select_last_index` - Whether to select last occurrence of the max value along the axis.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize value representing the index of the maximum value along the specified axis.
fn find_argmax<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
input: @Tensor<T>,
output_indices: Span<usize>,
axis: usize,
axis_index: usize,
max_value: T,
argmax: usize,
select_last_index: bool
) -> i32 {
if axis_index == *(*input.shape)[axis] {
return argmax.try_into().unwrap();
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(*input.shape, input_indices);
let ele = *(*input.data)[input_index];
let (new_max_value, new_argmax) = if ele > max_value {
(ele, axis_index)
} else {
if select_last_index && ele == max_value {
(ele, axis_index)
} else {
(max_value, argmax)
}
};
return find_argmax(
input,
output_indices,
axis,
axis_index + 1_usize,
new_max_value,
new_argmax,
select_last_index
);
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/argmin.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, combine_indices, len_from_shape};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::argmin docstring
fn argmin<
T,
MAG,
impl UsizeTensor: TensorTrait<usize>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
let keepdims = match keepdims {
Option::Some(val) => val,
Option::None => true,
};
let select_last_index = match select_last_index {
Option::Some(val) => val,
Option::None => false,
};
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
if (*self.shape).len() == 1 {
return find_argmin_1D(*self, axis, true, select_last_index);
}
let mut output_data: Array<u32> = array![];
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let MAX = NumberTrait::max_value();
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_argmin = find_argmin(self, output_indices, axis, 0, MAX, 0, select_last_index);
output_data.append(current_argmin);
index += 1;
};
TensorTrait::<usize>::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span())
}
/// Helper function that finds the index of the minimum value in a flat tensor.
///
/// # Arguments
/// * `input` - The input tensor.
/// * `axis` - The axis along which to find the minimum value.
/// * `keepdims` - Whether to keep the reduced dimension or not.
/// * `select_last_index` - Whether to select last occurrence of the min value along the axis.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize value representing the index of the minimum value along the specified axis.
fn find_argmin_1D<
T,
impl UsizeTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut input: Tensor<T>, axis: usize, keepdims: bool, select_last_index: bool
) -> Tensor<usize> {
let mut output_data = ArrayTrait::<usize>::new();
let mut min = match input.data.pop_front() {
Option::Some(item) => *item,
Option::None => {
return TensorTrait::<
usize
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
};
let mut min_index = 0;
let mut count = 0;
loop {
match input.data.pop_front() {
Option::Some(item) => {
count += 1;
if *item < min {
min = *item;
min_index = count;
} else {
if select_last_index && item == @min {
min_index = count;
}
};
},
Option::None => { break; }
};
};
output_data.append(min_index);
return TensorTrait::<
usize
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
/// Recursive helper function that finds the index of the minimum value along a specific axis.
///
/// # Arguments
/// * `input` - The input tensor.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to find the minimum value.
/// * `axis_index` - The current index along the specified axis.
/// * `min_value` - The current minimum value found along the axis.
/// * `argmin` - The current index of the minimum value along the axis.
/// * `select_last_index` - Whether to select last occurrence of the min value along the axis.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A usize value representing the index of the minimum value along the specified axis.
fn find_argmin<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
input: @Tensor<T>,
output_indices: Span<usize>,
axis: usize,
axis_index: usize,
min_value: T,
argmin: usize,
select_last_index: bool
) -> usize {
if axis_index == *(*input.shape)[axis] {
return argmin;
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(*input.shape, input_indices);
let ele = *(*input.data)[input_index];
let (new_min_value, new_argmin) = if ele < min_value {
(ele, axis_index)
} else {
if select_last_index && ele == min_value {
(ele, axis_index)
} else {
(min_value, argmin)
}
};
return find_argmin(
input,
output_indices,
axis,
axis_index + 1_usize,
new_min_value,
new_argmin,
select_last_index
);
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/arithmetic.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index,};
use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape,};
use orion::utils::saturate;
fn add<
T, impl TTensor: TensorTrait<T>, impl TAdd: Add<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] + *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn add_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TAdd: Add<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::zero() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele + val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_add<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] + *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn sub<
T, impl TTensor: TensorTrait<T>, impl TSub: Sub<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] - *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn sub_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TSub: Sub<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::zero() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele - val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_sub<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TSub: Sub<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] - *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn mul<
T, impl TTensor: TensorTrait<T>, impl TMul: Mul<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] * *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn mul_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TMul: Mul<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::one() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele * val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_mul<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] * *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn div<
T, impl TTensor: TensorTrait<T>, impl TMul: Div<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] / *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn div_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TDiv: Div<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::one() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele / val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_div<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TDiv: Div<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] / *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn div_downcast<
T,
D,
impl TTensor: TensorTrait<T>,
impl DTensor: TensorTrait<D>,
impl DDiv: Div<D>,
impl TTryIntoD: TryInto<T, D>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl DCopy: Copy<D>,
impl DDrop: Drop<D>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<D> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
(*(*self.data)[indices_self]).try_into().unwrap()
/ (*(*other.data)[indices_other]).try_into().unwrap()
);
n += 1;
};
TensorTrait::<D>::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/asin.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::asin docstring
fn asin<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).asin()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/asinh.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::asinh docstring
fn asinh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).asinh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/atan.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn atan<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).atan()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/binarizer.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::binarizer docstring
fn binarizer<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut self: Tensor<T>, threshold: Option<T>
) -> Tensor<T> {
let threshold: T = if threshold.is_some() {
threshold.unwrap()
} else {
NumberTrait::zero()
};
let mut binarized_data: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if (*item) > threshold {
binarized_data.append(NumberTrait::one());
} else {
binarized_data.append(NumberTrait::zero());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, binarized_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/bitwise_and.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_and<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_and(lhs, rhs));
// let res = *(*y.data).at(n) ^ *(*z.data).at(n)
// result.append(res);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/bitwise_or.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_or<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_or(lhs, rhs));
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/bitwise_xor.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_xor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_xor(lhs, rhs));
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/blackman_window.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn blackman_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let two: T = one_step + one_step;
let three: T = two + one_step;
let n25: T = three.pow(three) - two;
let alpha: T = (n25 - two * two) / (n25 * two);
let beta: T = two / n25;
let n_0_5: T = (one_step - two) / two;
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr1: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = (v * (PI * two)) / N_1;
arr1.append(r);
i += 1;
};
let window_cos = TensorTrait::<T>::new(ni.shape, arr1.span()).cos();
i = 0;
let mut a1: Array<T> = array![];
while i != len {
let v = *(window_cos.data).at(i);
let r = v * n_0_5;
a1.append(r);
i += 1;
};
let window1 = TensorTrait::<T>::new(ni.shape, a1.span());
let mut arr2: Array<T> = array![];
i = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * (PI * two * two) / N_1;
arr2.append(r);
i += 1;
};
let window_cos_2 = TensorTrait::<T>::new(ni.shape, arr2.span()).cos();
let mut a2: Array<T> = array![];
i = 0;
while i != len {
let v = *(window_cos_2.data).at(i);
let r = v * beta + alpha;
a2.append(r);
i += 1;
};
let window2 = TensorTrait::<T>::new(ni.shape, a2.span());
let mut arr: Array<T> = array![];
i = 0;
while i != len {
let v1 = *(window1.data).at(i);
let v2 = *(window2.data).at(i);
let r = v1 + v2;
arr.append(r);
i += 1;
};
TensorTrait::<T>::new(ni.shape, arr.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/ceil.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::ceil docstring
fn ceil<
T,
MAG,
impl FFixedTrait: FixedTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).ceil()); },
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/compress.cairo | use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::U32TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// Cf: TensorTrait::compare docstring
fn compress<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => 999
};
let data_rank = (*self.shape).len();
let condition_rank = (condition.shape).len();
assert((data_rank >= 1), 'data rank must > 1');
assert((condition_rank == 1), 'condition rank must be 1');
let mut data_shape = *self.shape;
if (axis != 999) {
assert(*data_shape.at(axis) >= condition.data.len(), 'index out of bound');
}
let mut output_shape = array![];
let mut index_data = array![];
let mut output_data = array![];
let mut condition_data = condition.data;
let mut ind = 0;
let mut condition_data_clone = condition_data.clone();
let mut output = 0;
loop {
match condition_data_clone.pop_front() {
Option::Some(val) => {
if (*val != 0) {
output += 1;
}
ind += 1;
},
Option::None => { break; }
};
};
if (axis == 999) {
output_shape.append(output);
let mut total_shape = 1;
loop {
match data_shape.pop_front() {
Option::Some(val) => { total_shape *= *val; },
Option::None => { break; }
};
};
let mut ind = 0;
loop {
match condition_data.pop_front() {
Option::Some(val) => {
if (ind == total_shape) {
break;
}
if (*val != 0) {
output_data.append(*self.data[ind]);
}
ind += 1;
},
Option::None => { break; }
};
};
} else {
let mut ind = 0;
let mut loop_breaker = 1;
let mut other_loop_breaker = 1;
let mut multiplier = 1;
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind == axis) {
output_shape.append(output);
} else {
output_shape.append(*val);
if (ind > axis) {
loop_breaker *= *val;
}
if (ind >= axis) {
multiplier *= *val;
}
if (ind < axis) {
other_loop_breaker *= *val;
}
}
ind += 1;
},
Option::None => { break; }
};
};
let mut ind = 0;
let mut inner_index: usize = 0;
loop {
if (ind == other_loop_breaker) {
break;
}
let mut condition_data_clone = condition_data.clone();
inner_index = *data_shape.at(axis) * ind;
loop {
match condition_data_clone.pop_front() {
Option::Some(val) => {
if (*val != 0) {
let result = inner_index * loop_breaker;
let mut data_ind: usize = result;
loop {
if data_ind == result + loop_breaker {
break;
}
index_data.append(data_ind);
data_ind += 1;
};
}
inner_index += 1;
},
Option::None => { break; }
};
};
ind += 1;
};
loop {
match index_data.pop_front() {
Option::Some(val) => { output_data.append(*self.data[val]); },
Option::None => { break; }
};
};
}
let mut output_tensor = TensorTrait::<T>::new(output_shape.span(), output_data.span());
output_tensor
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/concat.cairo | use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::{TensorTrait, Tensor};
fn concat<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
mut tensors: Span<Tensor<T>>, axis: usize
) -> Tensor<T> {
assert(tensors.len() >= 2, 'Input tensors must be > 1');
let base_tensor = *tensors.at(0);
let base_shape = base_tensor.shape;
let dimension = base_shape.len();
assert(dimension > axis, 'Out of bounds for dimension');
// Validate shapes of tensors
validate_shapes(tensors, base_shape, axis);
// Calculate output size
let output_size = compute_output_size(base_shape, tensors, axis);
// Concatenate tensor data
let output_data: Array<T> = concatenate_data(tensors, axis, base_shape);
TensorTrait::<T>::new(output_size.span(), output_data.span())
}
fn validate_shapes<T>(mut tensors: Span<Tensor<T>>, mut base_shape: Span<usize>, axis: usize) {
loop {
match tensors.pop_front() {
Option::Some(tensor) => {
assert(base_shape.len() == (*tensor.shape).len(), 'Dimension not the same');
let mut axis_index = 0;
let mut tensor_shape = *tensor.shape;
let mut base_shape_copy = base_shape;
loop {
match tensor_shape.pop_front() {
Option::Some(tensor_shape_i) => {
let base_shape_i = base_shape_copy.pop_front().unwrap();
if axis_index != axis {
assert(base_shape_i == tensor_shape_i, 'Shape is not the same');
}
axis_index += 1;
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
}
fn compute_output_size<T>(
mut base_shape: Span<usize>, mut tensors: Span<Tensor<T>>, axis: usize
) -> Array<u32> {
let mut output_size: Array<usize> = array![];
let mut axis_size = 0;
loop {
match tensors.pop_front() {
Option::Some(tensor) => { axis_size += *(*tensor.shape).at(axis); },
Option::None => { break; }
};
};
let mut shape_index = 0;
loop {
match base_shape.pop_front() {
Option::Some(item) => {
if shape_index == axis {
output_size.append(axis_size);
} else {
output_size.append(*item);
}
shape_index += 1;
},
Option::None => { break; }
};
};
output_size
}
fn concatenate_data<T, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
mut tensors: Span<Tensor<T>>, axis: usize, base_shape: Span<usize>
) -> Array<T> {
let mut output_data: Array<T> = array![];
let total_loops = product_upto(base_shape, axis);
let mut outer_loop_index = 0;
while outer_loop_index != total_loops {
let mut tensors_copy = tensors;
loop {
match tensors_copy.pop_front() {
Option::Some(tensor) => {
let slice_len = (*tensor.data).len() / total_loops;
let mut inner_index = 0;
while inner_index != slice_len {
output_data
.append(*(*tensor.data).at(slice_len * outer_loop_index + inner_index));
inner_index += 1;
};
},
Option::None => { break; }
};
};
outer_loop_index += 1;
};
output_data
}
fn product_upto(mut shape: Span<usize>, upto: usize) -> usize {
let mut total = 1;
let mut index = 0;
loop {
match shape.pop_front() {
Option::Some(val) => {
if index == upto {
break;
}
total *= *val;
index += 1;
},
Option::None => { break; }
};
};
total
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/cos.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::cos docstring
fn cos<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).cos()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/cosh.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::cosh docstring
fn cosh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).cosh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/cumsum.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
/// Cf: TensorTrait::cumsum docstring
fn cumsum<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<T> {
let reverse = match reverse {
Option::Some(val) => val,
Option::None => false
};
if reverse {
cumsum_reverse::<T>(self, axis, exclusive, NumberTrait::zero())
} else {
cumsum_forward::<T>(self, axis, exclusive, NumberTrait::zero())
}
}
/// Cf: TensorTrait::cumsum docstring
fn cumsum_forward<
T,
impl TTensorTrait: TensorTrait<T>,
impl TAdd: Add<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, zero: T,
) -> Tensor<T> {
let exclusive = match exclusive {
Option::Some(val) => val,
Option::None => false,
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let data = *self.data;
let mut output_data = array![];
let mut index: usize = 0;
while index != data.len() {
let current_indices = unravel_index(index, *self.shape);
let axis_value = *current_indices[axis];
if axis_value == 0 {
if exclusive {
output_data.append(zero);
} else {
output_data.append(*(data)[index]);
}
} else {
let previous_axis_element_indices = replace_index(
current_indices, axis, axis_value - 1
);
let previous_axis_element_index = ravel_index(
*self.shape, previous_axis_element_indices
);
if exclusive {
output_data
.append(
*output_data[previous_axis_element_index]
+ *(data)[previous_axis_element_index]
);
} else {
output_data.append(*output_data[previous_axis_element_index] + *(data)[index]);
};
}
index += 1;
};
TensorTrait::<T>::new(*self.shape, output_data.span())
}
/// Cf: TensorTrait::cumsum docstring
fn cumsum_reverse<
T,
impl TTensorTrait: TensorTrait<T>,
impl TAddEq: AddEq<T>,
impl TSub: Sub<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, zero: T,
) -> Tensor<T> {
let exclusive = match exclusive {
Option::Some(val) => val,
Option::None => false,
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let data = *self.data;
let mut output_data = array![];
let mut index: usize = 0;
while index != data.len() {
let current_indices = unravel_index(index, *self.shape);
let mut axis_value = *current_indices[axis];
if axis_value == 0 {
// If the axis value is 0, we need to sum all the elements
// in the axis.
let mut sum = *(data)[index];
if exclusive {
sum = zero;
}
let end_index = *(*self.shape)[axis] - 1;
loop {
axis_value += 1;
if axis_value > end_index {
break ();
}
let next_axis_element_indices = replace_index(current_indices, axis, axis_value);
let next_axis_element_index = ravel_index(*self.shape, next_axis_element_indices);
sum += *data[next_axis_element_index];
};
output_data.append(sum);
} else {
// If the axis value is not 0, we only need to do a subtraction
let previous_axis_element_indices = replace_index(
current_indices, axis, axis_value - 1
);
let previous_axis_element_index = ravel_index(
*self.shape, previous_axis_element_indices
);
if exclusive {
output_data.append(*output_data[previous_axis_element_index] - *(data)[index]);
} else {
output_data
.append(
*output_data[previous_axis_element_index]
- *(data)[previous_axis_element_index]
);
}
}
index += 1;
};
TensorTrait::<T>::new(*self.shape, output_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/equal.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::equal docstring
fn equal<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] == *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/erf.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::erf docstring
fn erf<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).erf()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/exp.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::exp docstring
fn exp<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).exp()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
/// Cf: TensorTrait::exp docstring
fn exp_upcast<
T,
TMAG,
W,
WMAG,
impl TFixedTrait: FixedTrait<T, TMAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WFixedTrait: FixedTrait<W, WMAG>,
impl WTensor: TensorTrait<W>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TIntoW: Into<T, W>,
>(
mut self: Tensor<T>
) -> Tensor<W> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((TIntoW::into(*item)).exp()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/flatten.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::flatten docstring
fn flatten<T, impl TTensorTrait: TensorTrait<T>>(self: @Tensor<T>, axis: usize) -> Tensor<T> {
let mut shape = *self.shape;
assert(axis < shape.len(), 'axis out of dimensions');
let mut new_shape_first_axis = 1;
let mut index = 0;
loop {
match shape.pop_front() {
Option::Some(val) => {
if index == axis {
break;
}
new_shape_first_axis *= *val;
index += 1;
},
Option::None => { break; }
};
};
let new_shape_second_axis = (*self.data).len() / new_shape_first_axis;
self
.reshape(
array![
new_shape_first_axis.try_into().unwrap(), new_shape_second_axis.try_into().unwrap()
]
.span(),
false
)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/gather.cairo | use core::option::OptionTrait;
use core::traits::TryInto;
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: TensorTrait::gather docstring
fn gather<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<T> {
let axis: usize = match axis {
Option::Some(val) => {
if val < 0 {
(((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap()
} else {
val.try_into().unwrap()
}
},
Option::None => 0
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let axis_shape = *(*self.shape).at(axis);
// Adjust indices that are negative
let mut adjusted_indices = array![];
let mut indices_data = indices.data.clone();
loop {
match indices_data.pop_front() {
Option::Some(index) => {
let adjusted_index: usize = if *index < 0 {
let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap();
val
} else {
let val: u32 = (*index).try_into().unwrap();
val
};
assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds');
adjusted_indices.append(adjusted_index);
},
Option::None => { break; }
};
};
let mut output_data = array![];
let mut output_size = array![];
let mut self_shape = *self.shape;
let mut i: usize = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == axis {
let mut indices_shape = indices.shape;
loop {
match indices_shape.pop_front() {
Option::Some(item) => { output_size.append(*item); },
Option::None => { break; }
};
};
} else {
output_size.append(*val);
}
i += 1;
},
Option::None => { break; }
};
};
let mut outer_loop_break = 1;
let mut divisor = (*self.data).len();
let mut self_shape = *self.shape;
let mut i: usize = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == axis {
divisor /= *val;
break ();
};
outer_loop_break *= *val;
divisor /= *val;
i += 1;
},
Option::None => { break; }
};
};
let mut break_loop: usize = 1;
let mut self_shape = *self.shape;
loop {
match self_shape.pop_back() {
Option::Some(val) => {
if self_shape.len() + 1 == axis {
break;
}
break_loop *= *val;
},
Option::None => { break; }
};
};
let mut outer_loop: usize = 0;
let axis_index = *self.shape[axis];
while outer_loop != outer_loop_break {
let mut adjusted_indices_iter = adjusted_indices.clone();
loop {
match adjusted_indices_iter.pop_front() {
Option::Some(indice) => {
let mut inner_loop = 0;
while inner_loop != break_loop {
let new_val = inner_loop / divisor % axis_index;
if indice == new_val {
output_data.append(*self.data[break_loop * outer_loop + inner_loop]);
}
inner_loop += 1;
}
},
Option::None => { break; },
};
};
outer_loop += 1;
};
let mut output_tensor = TensorTrait::<T>::new(output_size.span(), output_data.span());
output_tensor
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/gather_elements.cairo | use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: TensorTrait::gather_elements docstring
fn gather_elements<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<T> {
let axis: usize = match axis {
Option::Some(val) => {
if val < 0 {
(((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap()
} else {
val.try_into().unwrap()
}
},
Option::None => 0
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let axis_shape = *(*self.shape).at(axis);
// Adjust indices that are negative
let mut adjusted_indices = array![];
let mut indices_data = indices.data.clone();
loop {
match indices_data.pop_front() {
Option::Some(index) => {
let adjusted_index: usize = if *index < 0 {
let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap();
val
} else {
let val: u32 = (*index).try_into().unwrap();
val
};
assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds');
adjusted_indices.append(adjusted_index);
},
Option::None => { break; }
};
};
let mut output_data = array![];
let mut data_shape_clone = (*self.shape).clone();
let mut multiplier = 1;
let mut looper = 1;
let mut ind = 0;
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if ind >= axis {
multiplier *= *val;
}
if ind > axis {
looper *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
let inner_loop = multiplier / axis_shape;
let mut adjusted_indices_iter = adjusted_indices.clone();
let mut i: usize = 0;
loop {
match adjusted_indices_iter.pop_front() {
Option::Some(indice) => {
let value = if axis == 0 {
indice * inner_loop + (i % inner_loop)
} else if axis == (*self.shape).len() - 1 {
indice + axis_shape * (i / axis_shape)
} else {
indice * looper
+ (i % looper)
+ (multiplier / axis_shape) * (i / (multiplier / axis_shape))
};
output_data.append(*self.data[value]);
i += 1;
},
Option::None => { break; }
};
};
TensorTrait::<T>::new(indices.shape, output_data.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/gather_nd.cairo | use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::U32TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// Cf: TensorTrait::gather_nd docstring
fn gather_nd<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<T> {
let batch_dims = match batch_dims {
Option::Some(val) => val,
Option::None => 0
};
let data_rank = (*self.shape).len();
let indices_rank = (indices.shape).len();
assert((data_rank >= 1) & (indices_rank >= 1), 'rank must > 1');
let mut data_shape = *self.shape;
let mut indices_shape = indices.shape;
let mut data_shape_clone = data_shape.clone();
let mut indices_shape_clone = indices_shape.clone();
let indices_shape_last = indices_shape_clone.pop_back().unwrap();
assert(
(*indices_shape_last >= 1) & (*indices_shape_last <= data_rank - batch_dims),
'check indices'
);
let mut batch_dims_shape = array![];
let mut output_shape = array![];
let mut index_data = array![];
let mut output_data = array![];
let mut batch_dims_size = batch_dims;
let mut total_data_len = 1;
let mut multiple_data_len = array![];
let mut ind = 0;
while ind != batch_dims {
match indices_shape_clone.pop_front() {
Option::Some(val) => {
batch_dims_size *= *val;
batch_dims_shape.append(*val);
ind += 1;
},
Option::None => { break; }
};
};
loop {
match indices_shape_clone.pop_front() {
Option::Some(val) => { batch_dims_shape.append(*val); },
Option::None => { break; }
};
};
if (*indices_shape_last == data_rank - batch_dims) {
output_shape = batch_dims_shape;
} else {
let mut ind = 0;
output_shape = batch_dims_shape;
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= (batch_dims + *indices_shape_last)) {
output_shape.append(*val);
}
ind += 1;
},
Option::None => { break; }
};
};
}
let mut ind = 0;
let mut multiple = 1;
let mut incrementer = 1;
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= batch_dims) {
multiple *= *val;
multiple_data_len.append(multiple);
}
if (ind >= batch_dims + *indices_shape_last) {
incrementer *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
let mut ind = 0;
let mut indices_shape_clone = indices_shape.clone();
let mut breaker = 1;
loop {
match indices_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= batch_dims) {
breaker *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
total_data_len = *multiple_data_len.at(multiple_data_len.len() - 1);
let mut data_indices = indices.data;
let mut ind = 0;
let mut result = 0;
loop {
match data_indices.pop_front() {
Option::Some(val) => {
let index = ind % *indices_shape_last;
let incr = total_data_len * (ind / breaker);
result += (*val * total_data_len / *multiple_data_len.at(index));
ind += 1;
if (index == *indices_shape_last - 1) {
let mut data_ind: usize = result;
while data_ind != result + incrementer {
index_data.append(data_ind + incr);
data_ind += 1;
};
result = 0;
};
},
Option::None => { break; }
};
};
loop {
match index_data.pop_front() {
Option::Some(val) => { output_data.append(*self.data[val]); },
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(output_shape.span(), output_data.span());
output_tensor
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/greater.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::greater docstring
fn greater<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] > *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/greater_equal.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::greater_equal docstring
fn greater_equal<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] >= *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/hamming_window.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn hamming_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let two: T = one_step + one_step;
let three: T = two + one_step;
let n25: T = three.pow(three) - two;
let n46: T = n25 * two - two * two;
let alpha: T = n25 / n46;
let beta: T = one_step - alpha;
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * PI * two / N_1;
arr.append(r);
i += 1;
};
let window = TensorTrait::<T>::new(ni.shape, arr.span());
let window_cos = window.cos();
let len2 = *(ni.shape).at(0);
let mut arr2: Array<T> = array![];
let mut j: usize = 0;
while j != len2 {
let v = *(window_cos.data).at(j);
let v_2 = alpha - v * beta;
arr2.append(v_2);
j += 1;
};
let window_cos_2 = TensorTrait::<T>::new(ni.shape, arr2.span());
window_cos_2
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/hann_window.cairo | use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn hann_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * PI / N_1;
arr.append(r);
i += 1;
};
let window = TensorTrait::<T>::new(ni.shape, arr.span());
let window_sin = window.sin();
let len2 = *(ni.shape).at(0);
let mut arr2: Array<T> = array![];
let mut j: usize = 0;
while j != len2 {
let v = *(window_sin.data).at(j);
let v_2 = v * v;
arr2.append(v_2);
j += 1;
};
let window_sin_2 = TensorTrait::<T>::new(ni.shape, arr2.span());
window_sin_2
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/is_inf.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
/// Cf: TensorTrait::is_inf docstring
fn is_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
let neg_opt = match detect_negative {
Option::Some(val) => { if val == 0 {
0
} else {
1
} },
Option::None => 1,
};
let pos_opt = match detect_positive {
Option::Some(val) => { if val == 0 {
0
} else {
1
} },
Option::None => 1,
};
if neg_opt == 0 && pos_opt == 0 {
return TensorTrait::new(*x.shape, ArrayTrait::<bool>::new().span());
}
if neg_opt == 0 && pos_opt == 1 {
return is_pos_inf(x);
}
if neg_opt == 1 && pos_opt == 0 {
return is_neg_inf(x);
}
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
/// Cf: TensorTrait::is_pos_inf docstring
fn is_pos_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_pos_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
/// Cf: TensorTrait::is_neg_inf docstring
fn is_neg_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_neg_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/is_nan.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
/// Cf: TensorTrait::is_nan docstring
fn is_nan<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_nan()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/layer_normalization.cairo | use core::option::OptionTrait;
use core::traits::TryInto;
use orion::numbers::{NumberTrait, I32IntoU32};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
/// Cf: TensorTrait::layer_normalization docstring
fn layer_normalization<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+Div<Tensor<T>>,
+Sub<Tensor<T>>,
+Add<Tensor<T>>,
+Mul<Tensor<T>>,
+Into<usize, MAG>,
>(
self: @Tensor<T>,
scale: @Tensor<T>,
B: Option<@Tensor<T>>,
axis: Option<i32>,
epsilon: Option<T>,
stash_type: Option<usize>,
) -> (Tensor<T>, Tensor<T>, Tensor<T>) {
let X_rank = (*self).shape.len();
let mut axis = match axis {
Option::Some(axis) => axis,
Option::None => -1,
};
let epsilon = match epsilon {
Option::Some(epsilon) => epsilon,
Option::None => NumberTrait::zero(), // default of onnx is 1e-05
};
let axis = if axis < 0 {
X_rank - axis.into()
} else {
axis.into()
};
let unsqueezed_rank = X_rank - axis;
let mut reduction_shape = array![];
let mut i = 0;
while i != axis {
reduction_shape.append(*(*self).shape.at(i));
i += 1;
};
let mut i = 0;
while i != unsqueezed_rank {
reduction_shape.append(1);
i += 1;
};
let mut row_number = 1;
let mut col_number = 1;
let mut i = 0;
while i != X_rank {
if i < axis {
row_number *= *(*self).shape.at(i);
} else {
col_number *= *(*self).shape.at(i);
}
i += 1;
};
let mut shape_matrix = array![];
shape_matrix.append(row_number.try_into().unwrap());
shape_matrix.append(col_number.try_into().unwrap());
// Shape [1, 1] to mutiply one element tensors with 2D matrices
let mut shape_one = array![];
shape_one.append(1);
shape_one.append(1);
let mut col_number_tensor = array![];
col_number_tensor.append(NumberTrait::new_unscaled(col_number.into(), false));
let mut epsilon_tensor = array![];
epsilon_tensor.append(epsilon);
let mut one_tensor = array![];
one_tensor.append(NumberTrait::one());
let x_mat = self.reshape(shape_matrix.span(), false);
let x_mean = x_mat
.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false))
/ TensorTrait::new(shape_one.span(), col_number_tensor.span());
let x_diff = x_mat - x_mean;
let x_squared_diff = x_diff * x_diff;
let variance = x_squared_diff.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false))
/ TensorTrait::new(shape_one.span(), col_number_tensor.span());
let variance_eps = variance + TensorTrait::new(shape_one.span(), epsilon_tensor.span());
let std_dev = variance_eps.sqrt();
let inv_std_dev = TensorTrait::new(shape_one.span(), one_tensor.span()) / std_dev;
let y_mat = x_diff * inv_std_dev;
let scale = if (*scale).shape.len() < (*self).shape.len() {
// Append 1 in scale shape to make sure scale has a dimension compatible with Y for multiplication
let mut shape = array![];
let mut i = 0;
while i != (*self).shape.len() - (*scale).shape.len() {
shape.append(1);
i += 1;
};
let mut i = 0;
while i != (*scale).shape.len() {
shape.append(*(*scale).shape.at(i));
i += 1;
};
TensorTrait::new(shape.span(), (*scale).data)
} else {
*scale
};
let mut i = 0;
let mut target_shape: Array<i32> = array![];
while i < (*self)
.shape
.len() {
target_shape.append((*(*self).shape.at(i)).try_into().unwrap());
i += 1;
};
let Y = y_mat.reshape(target_shape.span(), false) * scale;
let Y = match B {
Option::Some(B) => {
let B = if (*B).shape.len() < (*self).shape.len() {
// Append 1 in B shape to make sure scale has a dimension compatible with Y for multiplication
let mut shape = array![];
let mut i = 0;
while i != (*self).shape.len() - (*B).shape.len() {
shape.append(1);
i += 1;
};
let mut i = 0;
while i != (*B).shape.len() {
shape.append(*(*B).shape.at(i));
i += 1;
};
TensorTrait::new(shape.span(), (*B).data)
} else {
*B
};
Y + B
},
Option::None => Y,
};
let X_mean = TensorTrait::new(reduction_shape.span(), x_mean.data);
let X_inv_std_dev = TensorTrait::new(reduction_shape.span(), inv_std_dev.data);
(Y, X_mean, X_inv_std_dev)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/less.cairo | use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::less docstring
fn less<T, impl TPartialOrd: PartialOrd<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<i32> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<i32> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] < *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |