file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
src/operators/tensor/math/less_equal.cairo | use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::less_equal docstring
fn less_equal<
T,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<i32> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<i32> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] <= *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/log.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::log docstring
fn log<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).ln()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/max.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::max docstring
fn max<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
tensors: Span<Tensor<T>>
) -> Tensor<T> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
let first_tensor = *tensors.at(0);
if tensors.len() == 1 {
return first_tensor;
}
let mut max_shape: Span<usize> = first_tensor.shape;
let mut max_data: Span<T> = first_tensor.data;
let mut tensor_counter: usize = 1;
while tensor_counter != tensors.len() {
let mut new_max_data: Array<T> = array![];
let mut current_tensor = *tensors.at(tensor_counter);
let mut broadcasted_shape = broadcast_shape(max_shape, current_tensor.shape);
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let mut indices_broadcasted = unravel_index(n, broadcasted_shape);
let mut indices_self = broadcast_index_mapping(max_shape, indices_broadcasted);
let mut indices_other = broadcast_index_mapping(
current_tensor.shape, indices_broadcasted
);
let mut max_value = NumberTrait::max(
*(max_data)[indices_self], *(current_tensor.data)[indices_other]
);
new_max_data.append(max_value);
n += 1;
};
max_shape = broadcasted_shape;
max_data = new_max_data.span();
tensor_counter += 1;
};
TensorTrait::<T>::new(max_shape, max_data)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/max_in_tensor.cairo | use orion::numbers::NumberTrait;
/// Cf: TensorTrait::max_in_tensor docstring
fn max_in_tensor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec: Span::<T>
) -> T {
let mut max_value: T = NumberTrait::min_value();
loop {
match vec.pop_front() {
Option::Some(item) => {
let check_max = max_value.max(*item);
if (max_value < check_max) {
max_value = check_max;
}
},
Option::None => { break; }
};
};
max_value
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/min.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::min docstring
fn min<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
tensors: Span<Tensor<T>>
) -> Tensor<T> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
let first_tensor = *tensors.at(0);
if tensors.len() == 1 {
return first_tensor;
}
let mut min_shape: Span<usize> = first_tensor.shape;
let mut min_data: Span<T> = first_tensor.data;
let mut tensor_counter: usize = 1;
while tensor_counter != tensors.len() {
let mut new_min_data: Array<T> = array![];
let mut current_tensor = *tensors.at(tensor_counter);
let mut broadcasted_shape = broadcast_shape(min_shape, current_tensor.shape);
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let mut indices_broadcasted = unravel_index(n, broadcasted_shape);
let mut indices_self = broadcast_index_mapping(min_shape, indices_broadcasted);
let mut indices_other = broadcast_index_mapping(
current_tensor.shape, indices_broadcasted
);
let mut min_value = NumberTrait::min(
*(min_data)[indices_self], *(current_tensor.data)[indices_other]
);
new_min_data.append(min_value);
n += 1;
};
min_shape = broadcasted_shape;
min_data = new_min_data.span();
tensor_counter += 1;
};
TensorTrait::<T>::new(min_shape, min_data)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/min_in_tensor.cairo | use orion::numbers::NumberTrait;
/// Cf: TensorTrait::min_in_tensor docstring
fn min_in_tensor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec: Span::<T>
) -> T {
let mut min_value: T = NumberTrait::max_value();
loop {
match vec.pop_front() {
Option::Some(item) => {
let check_min = min_value.min(*item);
if (min_value > check_min) {
min_value = check_min;
}
},
Option::None => { break; }
};
};
min_value
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/neg.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::neg docstring
fn neg<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).neg()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/not.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::{tensor_bool::BoolTensor};
// Cf TensorTrait::not docstring
fn not(mut z: Tensor<bool>) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((!*item)); },
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/onehot.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: TensorTrait::onehot docstring
fn onehot_encode<
T,
MAG,
impl FFixed: FixedTrait<T, MAG>,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl U32TryIntoMAG: TryInto<u32, MAG>,
impl FPartialEq: PartialEq<T>,
impl FAdd: Add<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>, depth: usize, axis: Option<usize>, values: Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut shape = *self.shape;
let rank = shape.len();
// using 999 to denote -1, innermost dimension
let axis = match axis {
Option::Some(val) => val,
Option::None => 999
};
assert(((axis == 999) | (axis.into() <= rank)), 'axis out of dimensions');
let mut output_data = array![];
let mut output_size: Array<usize> = array![];
// New shape for output data
loop {
match shape.pop_front() {
Option::Some(size) => { output_size.append(*size); },
Option::None => { break; }
};
};
output_size.append(depth.into());
// OneHot enocde loop
loop {
match data.pop_front() {
Option::Some(outer_index) => {
let mut fixed_number = *outer_index;
if fixed_number.is_neg() {
fixed_number =
FixedTrait::<T, MAG>::new_unscaled(depth.try_into().unwrap(), false)
+ fixed_number
}
let mut inner_index = 0;
while inner_index != depth {
let ind = FixedTrait::<
T, MAG
>::new_unscaled(inner_index.try_into().unwrap(), false);
if fixed_number == ind {
output_data.append(*values.data.at(1));
} else {
output_data.append(*values.data.at(0));
};
inner_index += 1;
};
},
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::new(output_size.span(), output_data.span());
let mut tranpose_axes = array![];
// Get New shape is axis is not last dimension
if (axis != 999) & (axis.into() != rank) {
let mut index: usize = 0;
loop {
let max_dim = output_size.len() - 1;
if index.into() == max_dim {
break ();
};
if axis == index {
tranpose_axes.append(max_dim.into())
}
tranpose_axes.append(index.into());
index += 1;
};
output_tensor = output_tensor.transpose(tranpose_axes.span());
}
output_tensor
}
fn onehot<
T,
MAG,
impl FFixed: FixedTrait<T, MAG>,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl U32TryIntoMAG: TryInto<u32, MAG>,
impl FPartialEq: PartialEq<T>,
impl FAdd: Add<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>, depth: usize, axis: Option<usize>, mut values: Span<usize>,
) -> Tensor<T> {
assert(values.len() == 2, 'Wrong values dimensions');
let mut sizes = array![];
sizes.append(2);
let mut first = *values.pop_front().unwrap();
let mut second = *values.pop_front().unwrap();
let mut data = array![];
data.append(FixedTrait::<T, MAG>::new_unscaled(first.try_into().unwrap(), false));
data.append(FixedTrait::<T, MAG>::new_unscaled(second.try_into().unwrap(), false));
let values = TensorTrait::new(sizes.span(), data.span());
onehot_encode(self, depth, axis, values)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/optional_get_element.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::optional_get_element docstring
fn optional_get_element<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>, index: usize
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
// use of match to get element within and out the array bound
match z.data.get(index) {
Option::Some(item) => { data_result.append((*item.unbox())); },
Option::None => {}
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/or.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::or docstring
fn or<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl UsizeFTensor: TensorTrait<usize>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if NumberTrait::or(*(*y.data)[indices_self], *(*z.data)[indices_other]) {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/pow.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape};
/// Cf: TensorTrait::pow docstring
fn pow<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
result.append(NumberTrait::pow(*(*y.data)[indices_self], *(*z.data)[indices_other]));
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/random_uniform_like.cairo | use core::integer;
use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
/// Cf: TensorTrait::random_uniform_like docstring
fn random_uniform_like<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, high: Option<T>, low: Option<T>, seed: Option<usize>
) -> Tensor<T> {
let mut seed: usize = match seed {
Option::Some(seed) => seed,
Option::None => NumberTrait::max_value(),
};
let mut high = match high {
Option::Some(high) => high,
Option::None => NumberTrait::one(),
};
let mut low = match low {
Option::Some(low) => low,
Option::None => NumberTrait::zero(),
};
assert!(high > low, "high must be larger than low");
let res = tensor_get_state(tensor, seed, high, low);
res
}
fn tensor_get_state<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, mut seed: usize, high: T, low: T
) -> Tensor<T> {
let mut data = array![];
let mut count = (tensor.data).len();
let mut i = 0;
while count != i {
let mut v = NumberTrait::one();
v = hash_random_range(seed, low, high);
let a: u64 = 1664525;
let c: u64 = 1013904223;
let m: u64 = 4294967295;
let s: u64 = (a * seed.try_into().unwrap() + c) % m;
seed = s.try_into().unwrap();
data.append(v);
i += 1;
};
TensorTrait::new(tensor.shape, data.span())
}
// High level random in a range
// Only one random number per hash might be inefficient.
fn hash_random_range<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
seed: usize, min: T, max: T
) -> T {
let mut key = PedersenHasherImpl::new();
let hash: felt252 = key.hash(seed.into(), 1);
let a: u128 = 4294967295;
let b: u128 = match integer::u128s_from_felt252(hash) {
integer::U128sFromFelt252Result::Narrow(x) => x,
integer::U128sFromFelt252Result::Wide((x, _)) => x,
} % a;
let c: felt252 = b.into();
let rnd: T = NumberTrait::from_felt(c);
let range = max - min + NumberTrait::one(); // + 1 to include max
min + rnd % range
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/range.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn range<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut start: T, end: T, step: T
) -> Tensor<T> {
let mut result: Array<T> = array![];
let zero: T = NumberTrait::zero();
while !(step >= zero && start >= end) && !(step <= zero && start <= end) {
let v = start;
result.append(v);
start += step;
};
let shape = array![result.len()];
TensorTrait::<T>::new(shape.span(), result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_l1.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
/// Cf: TensorTrait::reduce_sum docstring
fn reduce_l1<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let data_abs = self.abs();
data_abs.reduce_sum(Option::Some(array![axis.try_into().unwrap()].span()), Option::Some(keepdims), Option::Some(false))
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_l2.cairo | use core::debug::PrintTrait;
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
fn square<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut output_data = array![];
loop {
match data.pop_front() {
Option::Some(item) => {
let ele = *item;
output_data.append(ele * ele);
},
Option::None => { break; }
};
};
let tensor_square = TensorTrait::new(*self.shape, output_data.span());
tensor_square
}
/// Cf: TensorTrait::reduce_l2 docstring
fn reduce_l2<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square = square(self);
let tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum.sqrt()
}
fn reduce_l2_complex<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPrint: PrintTrait<T>
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let mut tensor_square = square(@self.abs());
let mut tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum.sqrt()
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_log_sum.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
/// Cf: TensorTrait::reduce_sum_square docstring
fn reduce_log_sum<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square_sum = self
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
let tensor_square_sum_log = tensor_square_sum.log();
tensor_square_sum_log
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_log_sum_exp.cairo | use core::option::OptionTrait;
use core::array::ArrayTrait;
use core::array::SpanTrait;
use core::debug::PrintTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: TensorTrait::reduce_log_sum_exp docstring
// fn reduce_log_sum_exp_wide<
// T,
// TMAG,
// W,
// WMAG,
// impl TIntoW: Into<T, W>,
// impl WTryIntoT: TryInto<W, T>,
// impl WCopy: Copy<W>,
// impl WDrop: Drop<W>,
// impl TCopy: Copy<T>,
// impl TDrop: Drop<T>,
// impl TDiv: Div<T>,
// impl TTensor: TensorTrait<T>,
// impl WTensor: TensorTrait<W>,
// impl TFixed: FixedTrait<T, TMAG>,
// impl WFixed: FixedTrait<W, WMAG>
// >(
// self: @Tensor<T>, axis: usize, keepdims: bool
// ) -> Tensor<W> {
// let tensor_exp: Tensor<W> = exp_upcast(*self);
// let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims);
// return tensor_exp_log_sum;
// }
fn reduce_log_sum_exp<
T,
MAG,
impl Tensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_exp = self.exp();
let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis: axis, keepdims: keepdims);
return tensor_exp_log_sum;
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_mean.cairo | use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
/// Cf: TensorTrait::reduce_mean docstring
fn reduce_mean<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TDiv: Div<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr = array![];
let mut copy_axes = axes;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => { axes_arr.append(*axis); },
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span();
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes;
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_mean = accumulate_mean::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_mean].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_mean = accumulate_mean::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_mean);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes;
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
/// Helper function that accumulates the mean of elements along a specific axis.
///
/// # Arguments
/// * `input_data` - The input's data.
/// * `input_shape` - The input's shape.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to accumulate the mean.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A value representing the accumulated mean along the specified axis.
fn accumulate_mean<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TDiv: Div<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut acc: T = NumberTrait::zero();
let mut axis_index: T = NumberTrait::zero();
let mut axis_indexu32 = 0;
if (input_shape).len() > 1 {
while axis_indexu32 != axis_len {
let input_indices = combine_indices(output_indices, axis_indexu32, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
acc += ele;
axis_index += NumberTrait::one();
axis_indexu32 += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => {
acc += *item;
axis_index += NumberTrait::one();
axis_indexu32 += 1;
},
Option::None => { break; }
};
};
}
// let axis_index: T = NumberTrait::<T, MAG>::new(axis_index.try_into().unwrap(), false);
acc / axis_index
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_min.cairo | use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
/// Cf: TensorTrait::reduce_min docstring
fn reduce_min<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr: Array<usize> = array![];
let mut copy_axes = axes;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => { axes_arr.append(*axis); },
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span();
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes;
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_min = accumulate_min::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_min].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_min = accumulate_min::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_min);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes;
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
/// Helper function that accumulates the minimum of elements along a specific axis.
///
/// # Arguments
/// * `input_data` - The input's data.
/// * `input_shape` - The input's shape.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to accumulate the minimum.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A value representing the accumulated minimum along the specified axis.
fn accumulate_min<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut min: T = NumberTrait::max_value();
let mut axis_index = 0;
if (input_shape).len() > 1 {
while axis_index != axis_len {
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
if (ele < min) {
min = ele;
}
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => { if (*item < min) {
min = *item;
} },
Option::None => { break; }
};
};
}
min
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_prod.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
/// reduce_prod - Reduces a tensor to its products along specified axis.
fn reduce_prod<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
/// Performs the reduction operation akin to ONNX's 'reduce_prod' on the given tensor for Orion Runtime.
///
/// Given a tensor `self`, this function computes the product of elements along the specified axis.
/// If the tensor is one-dimensional, the axis must be 0. For multi-dimensional tensors,
/// the axis determines the dimension along which the reduction is performed.
///
/// Arguments:
/// - `self`: A reference to the input tensor on which the reduction operation is applied.
/// - `axis`: The axis along which the reduction operation is performed.
/// - `keepdims`: A boolean flag indicating whether to keep the reduced dimension in the output shape.
///
/// Returns:
/// - A new tensor resulting from the reduction operation.
/// If `keepdims` is `true`, the output tensor retains reduced dimensions;
/// otherwise, the reduced dimensions are eliminated from the output shape.
///
/// # Panics
/// - Panics if the specified axis is out of the tensor's dimensions.
///
/// # Examples
/// ```rust
/// // Create a tensor
/// let tensor_1 = TensorTrait::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),);
///
/// // Reduce along axis 1 while keeping the dimension
/// let reduced = reduce_prod(@tensor_1, 1, true);
///
/// // Verify the shape of the reduced tensor
/// assert(reduced.shape == array![2,1,2].span() , 'the tensors shapes are not equal');
/// ```
///
/// # Notes
/// - This function utilizes accumulation of products along the specified axis to perform the reduction.
/// - For one-dimensional tensors, the axis must be 0 to compute the product of all elements.
///
/// # See Also
/// - ONNX's 'reduce_prod' operation: https://github.com/onnx/onnx/blob/main/docs/Operators.md#ReduceProd
///
/// # References
/// - Orion: https://orion.gizatech.xyz/
/// - ONNX: Open Neural Network Exchange: https://onnx.ai/
///
/// ```
let mut output_data = array![];
if (*self.shape).len() == 1 {
assert(axis == 0, 'axis out of dimensions');
let current_prod = accumulate_production::<T>(*self.data, *self.shape, *self.shape, axis);
output_data.append(current_prod);
let mut output_shape = array![];
output_shape.append(1);
return TensorTrait::new(output_shape.span(), output_data.span());
} else {
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_sum = accumulate_production::<
T
>(*self.data, *self.shape, output_indices, axis);
output_data.append(current_sum);
index += 1;
};
if keepdims {
let output_shape = reduce_output_shape(*self.shape, axis, true);
TensorTrait::<T>::new(output_shape, output_data.span())
} else {
TensorTrait::<T>::new(output_shape, output_data.span())
}
}
}
/// Helper function that accumulates the product of elements along a specific axis.
///
/// # Arguments
/// * `input_data` - The input's data.
/// * `input_shape` - The input's shape.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to accumulate the product.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * An i32 value representing the accumulated product along the specified axis.
fn accumulate_production<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut acc: T = NumberTrait::one();
let mut axis_index: usize = 0;
if (input_shape).len() > 1 {
loop {
if axis_index == axis_len {
break ();
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
acc *= ele;
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => { acc *= *item; },
Option::None => { break; }
};
};
}
return acc;
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_sum.cairo | use core::option::OptionTrait;
use core::traits::TryInto;
use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
/// Cf: TensorTrait::reduce_sum docstring
fn reduce_sum<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr: Array<usize> = array![];
let mut copy_axes = axes.clone();
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
// Adjust negative axes to positive
let adjusted_axis = if *axis < 0 {
((*self.shape).len().try_into().unwrap() + *axis)
.try_into()
.unwrap()
} else {
(*axis).try_into().unwrap()
};
axes_arr.append(adjusted_axis);
},
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span();
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes.clone();
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_sum = accumulate_sum::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_sum].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_sum = accumulate_sum::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_sum);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes.clone();
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
/// Helper function that accumulates the sum of elements along a specific axis.
///
/// # Arguments
/// * `input_data` - The input's data.
/// * `input_shape` - The input's shape.
/// * `output_indices` - A span of output indices.
/// * `axis` - The axis along which to accumulate the sum.
///
/// # Panics
/// * Panics if gas limit is exceeded during execution.
///
/// # Returns
/// * A value representing the accumulated sum along the specified axis.
fn accumulate_sum<
T, MAG, impl TNumber: NumberTrait<T, MAG>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut sum: T = NumberTrait::zero();
let mut axis_index = 0;
if (input_shape).len() > 1 {
while axis_index != axis_len {
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
sum = NumberTrait::add(sum, ele);
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => sum = NumberTrait::add(sum, *item),
Option::None => { break; }
};
};
}
sum
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/reduce_sum_square.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::numbers::fixed_point::core::FixedTrait;
fn square<
T,
MAG,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut output_data = array![];
loop {
match data.pop_front() {
Option::Some(item) => {
let ele = *item;
output_data.append(ele * ele);
},
Option::None => { break; }
};
};
let tensor_square = TensorTrait::new(*self.shape, output_data.span());
tensor_square
}
/// Cf: TensorTrait::reduce_sum_square docstring
fn reduce_sum_square<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square = square(self);
let tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/resize.cairo | use alexandria_sorting::bubble_sort;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
#[derive(Copy, Drop)]
enum MODE {
NEAREST,
LINEAR,
CUBIC,
}
#[derive(Copy, Drop)]
enum NEAREST_MODE {
ROUND_PREFER_FLOOR,
ROUND_PREFER_CEIL,
FLOOR,
CEIL
}
#[derive(Copy, Drop)]
enum KEEP_ASPECT_RATIO_POLICY {
STRETCH,
NOT_LARGER,
NOT_SMALLER
}
#[derive(Copy, Drop)]
enum TRANSFORMATION_MODE {
HALF_PIXEL,
ALIGN_CORNERS,
ASYMMETRIC,
TF_CROP_AND_RESIZE,
PYTORCH_HALF_PIXEL,
HALF_PIXEL_SYMMETRIC
}
/// Cf: TensorTrait::resize docstring
fn resize<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
self: @Tensor<T>,
roi: Option<Tensor<T>>,
scales: Option<Span<T>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
cubic_coeff_a: Option<T>,
exclude_outside: Option<bool>,
extrapolation_value: Option<T>,
keep_aspect_ratio_policy: Option<KEEP_ASPECT_RATIO_POLICY>,
mode: Option<MODE>,
nearest_mode: Option<NEAREST_MODE>,
) -> Tensor<T> {
let output = interpolate_nd(
self,
antialias,
mode,
nearest_mode,
scales,
sizes,
roi,
keep_aspect_ratio_policy,
exclude_outside,
coordinate_transformation_mode,
extrapolation_value,
axes,
cubic_coeff_a
);
output
}
fn interpolate_nd<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
antialias: Option<usize>,
mode: Option<MODE>,
nearest_mode: Option<NEAREST_MODE>,
scale_factors: Option<Span<T>>,
output_size: Option<Span<usize>>,
roi: Option<Tensor<T>>,
keep_aspect_ratio_policy: Option<KEEP_ASPECT_RATIO_POLICY>,
exclude_outside: Option<bool>,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
extrapolation_value: Option<T>,
axes: Option<Span<usize>>,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
let mode = match mode {
Option::Some(mode) => mode,
Option::None => { MODE::NEAREST },
};
let keep_aspect_ratio_policy = match keep_aspect_ratio_policy {
Option::Some(keep_aspect_ratio_policy) => keep_aspect_ratio_policy,
Option::None => { KEEP_ASPECT_RATIO_POLICY::STRETCH },
};
let exclude_outside = match exclude_outside {
Option::Some(exclude_outside) => exclude_outside,
Option::None => { false },
};
let extrapolation_value = match extrapolation_value {
Option::Some(extrapolation_value) => extrapolation_value,
Option::None => { NumberTrait::zero() },
};
if output_size.is_none() && scale_factors.is_none() {
core::panic_with_felt252('size and scale are None');
}
let r = (*data).shape.len();
let (axes, scale_factors, output_size, roi) = match axes {
Option::Some(axes) => {
let mut scale_factors = match scale_factors {
Option::Some(scale_factors) => {
let mut new_scale_factors = ArrayTrait::<T>::new();
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break *scale_factors.at(i);
}
i += 1;
};
new_scale_factors.append(item);
d += 1;
};
Option::Some(new_scale_factors.span())
},
Option::None => { Option::None },
};
let mut output_size = match output_size {
Option::Some(output_size) => {
let mut new_output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break *output_size.at(i);
}
i += 1;
};
new_output_size.append(item);
d += 1;
};
Option::Some(new_output_size.span())
},
Option::None => { Option::None },
};
let mut roi = match roi {
Option::Some(roi) => {
let mut new_roi_data = array![];
let naxes = axes.len();
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::zero();
}
if *axes.at(i) == d {
break *roi.data.at(i);
}
i += 1;
};
new_roi_data.append(item);
d += 1;
};
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break *roi.data.at(i + naxes);
}
i += 1;
};
new_roi_data.append(item);
d += 1;
};
let mut shape = ArrayTrait::new();
shape.append(r * 2);
Option::Some(TensorTrait::new(shape.span(), new_roi_data.span()))
},
Option::None => { Option::None },
};
(axes, scale_factors, output_size, roi)
},
Option::None => {
let mut axes = array![];
let mut i = 0;
while i != r {
axes.append(i);
i += 1;
};
(axes.span(), scale_factors, output_size, roi)
}
};
let (mut output_size, mut scale_factors) = match output_size {
Option::Some(output_size) => {
let mut scale_factors: Array<T> = array![];
let mut i = 0;
while i != r {
let output_size_i: T = NumberTrait::new_unscaled(
(*output_size.at(i)).into(), false
);
let data_shape_i: T = NumberTrait::new_unscaled(
(*(*data).shape.at(i)).into(), false
);
scale_factors.append(output_size_i / data_shape_i);
i += 1;
};
let (mut output_size, mut scale_factors) = match keep_aspect_ratio_policy {
KEEP_ASPECT_RATIO_POLICY::STRETCH => { (output_size, scale_factors.span()) },
KEEP_ASPECT_RATIO_POLICY::NOT_LARGER => {
let mut scale = *scale_factors.at(*axes.at(0));
let mut i = 1;
while i != axes.len() {
if scale > *scale_factors.at(*axes.at(i)) {
scale = *scale_factors.at(*axes.at(i));
}
i += 1;
};
let mut scale_factors: Array<T> = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break scale;
}
i += 1;
};
scale_factors.append(item);
d += 1;
};
let mut output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break NumberTrait::round(
scale
* NumberTrait::new_unscaled(
(*(*data).shape.at(d)).into(), false
)
)
.try_into()
.unwrap();
}
i += 1;
};
output_size.append(item);
d += 1;
};
(output_size.span(), scale_factors.span())
},
KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER => {
let mut scale = *scale_factors.at(*axes.at(0));
let mut i = 1;
while i != axes.len() {
if scale < *scale_factors.at(*axes.at(i)) {
scale = *scale_factors.at(*axes.at(i));
}
i += 1;
};
let mut scale_factors: Array<T> = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break scale;
}
i += 1;
};
scale_factors.append(item);
d += 1;
};
let mut output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break NumberTrait::round(
scale
* NumberTrait::new_unscaled(
(*(*data).shape.at(d)).into(), false
)
)
.try_into()
.unwrap();
}
i += 1;
};
output_size.append(item);
d += 1;
};
(output_size.span(), scale_factors.span())
},
};
(output_size, scale_factors)
},
Option::None => {
let mut output_size: Array<usize> = array![];
let scale_factors = match scale_factors {
Option::Some(scale_factors) => scale_factors,
Option::None => { core::panic_with_felt252('size and scale None') },
};
let mut i = 0;
while i != scale_factors.len() {
let item = *scale_factors.at(i)
* NumberTrait::new_unscaled((*(*(data).shape).at(i)).into(), false);
output_size.append(item.try_into().unwrap());
i += 1;
};
(output_size.span(), scale_factors)
},
};
let mut ret: Array<Span<usize>> = array![];
let mut i = 0;
while i != output_size.len() {
let mut temp = ArrayTrait::<usize>::new();
let mut j = 0;
while j != *output_size.at(i) {
temp.append(j);
j += 1;
};
ret.append(temp.span());
i += 1;
};
let mut ret = cartesian(ret.span());
let mut ret_data = array![];
loop {
match ret.pop_front() {
Option::Some(X) => {
let mut x: Array<T> = array![];
let mut i = 0;
while i != X.len() {
x.append(NumberTrait::new_unscaled((*X.at(i)).into(), false));
i += 1;
};
let mut x = x.span();
let item = interpolate_nd_with_x(
data,
(*data).shape.len(),
scale_factors,
output_size,
x,
antialias,
mode,
nearest_mode,
roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
ret_data.append(*item.data.at(0));
},
Option::None => { break; }
}
};
let mut shape = array![];
shape.append(ret_data.len());
TensorTrait::new(output_size, ret_data.span())
}
fn cartesian(mut arrays: Span<Span<usize>>,) -> Array<Array<usize>> {
let mut n = 1;
let mut i = arrays.len() - 1;
loop {
n = n * (*(arrays.at(i))).len();
if i == 0 {
break;
}
i -= 1;
};
let mut i = 0;
let mut size_arrays = array![];
while i != arrays.len() {
size_arrays.append((*(arrays.at(i))).len());
i += 1;
};
let size_arrays = size_arrays.span();
let mut output_arrays = array![];
let mut m = n;
let mut i = 0;
while i != arrays.len() {
m = m / (*(arrays.at(i))).len();
let mut out = repeat(*(arrays.at(i)), m);
out = repeat_2(out, size_arrays, i);
output_arrays.append(out);
i += 1;
};
let output_arrays = output_arrays.span();
let mut i = 0;
let mut ret = array![];
while i != n {
let mut j = 0;
let mut x = array![];
while j != arrays.len() {
x.append(*(output_arrays.at(j)).at(i));
j += 1;
};
ret.append(x);
i += 1;
};
ret
}
fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> {
let mut size = array.len();
let mut i = 0;
while i != index {
let mut j = 1;
while j != *size_array.at(index - 1 - i) {
let mut k = 0;
while k != size {
array.append(*array.at(k));
k += 1;
};
j += 1;
};
size = size * *size_array.at(index - 1 - i);
i += 1;
};
array
}
fn repeat(array: Span<usize>, m: usize,) -> Array<usize> {
let mut out = array![];
let mut j = 0;
while j != array.len() {
let mut k = 0;
while k != m {
out.append(*array.at(j));
k += 1;
};
j += 1;
};
out
}
fn interpolate_nd_with_x<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
n: usize,
mut scale_factor: Span<T>,
mut output_size: Span<usize>,
mut x: Span<T>,
antialias: Option<usize>,
mode: MODE,
nearest_mode: Option<NEAREST_MODE>,
roi: Option<Tensor<T>>,
extrapolation_value: T,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
exclude_outside: bool,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
if n == 1 {
return interpolate_1d_with_x(
data,
*scale_factor.at(0),
*output_size.at(0),
*x.at(0),
antialias,
mode,
nearest_mode,
roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
}
let mut res1d = array![];
let scale_factor_zero = match scale_factor.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('scale factor empty')
};
let output_size_zero = match output_size.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('output_size empty')
};
let x_zero = match x.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('x empty')
};
let reduced_roi = match roi {
Option::Some(roi) => {
let mut reduced_roi = ArrayTrait::new();
let mut reduced_roi_shape = ArrayTrait::new();
reduced_roi_shape.append(roi.data.len() - 2);
let mut i = 1;
while i != 2 * n {
if i != n {
reduced_roi.append(*roi.data.at(i));
}
i += 1;
};
Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span()))
},
Option::None => { Option::None }
};
let mut i = 0;
while i != *(*data).shape.at(0) {
let data = get_row_n(data, i);
let mut r = interpolate_nd_with_x(
@data,
n - 1,
scale_factor,
output_size,
x,
antialias,
mode,
nearest_mode,
reduced_roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
loop {
match r.data.pop_front() {
Option::Some(item) => { res1d.append(*item); },
Option::None => { break; }
}
};
i += 1;
};
let mut shape = array![];
shape.append(res1d.len());
let res1d = TensorTrait::new(shape.span(), res1d.span());
let reduced_roi = match roi {
Option::Some(roi) => {
let mut reduced_roi = array![];
let mut reduced_roi_shape = array![];
reduced_roi_shape.append(2);
reduced_roi.append(*roi.data.at(0));
reduced_roi.append(*roi.data.at(n));
Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span()))
},
Option::None => { Option::None }
};
let a = interpolate_1d_with_x(
@res1d,
scale_factor_zero,
output_size_zero,
x_zero,
antialias,
mode,
nearest_mode,
reduced_roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
//let mut ret = array![];
//let mut shape = array![];
//shape.append(2);
//ret.append(NumberTrait::zero());
a
}
fn get_row_n<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>(
data: @Tensor<T>, index: usize,
) -> Tensor<T> {
let mut output_data = array![];
let mut output_shape = array![];
let mut stride_output = 1;
let mut i = 0;
while i != (*data).shape.len() {
if i != 0 {
output_shape.append(*(*data).shape.at(i));
stride_output = stride_output * *(*data).shape.at(i);
}
i += 1;
};
let mut i = 0;
while i != stride_output {
output_data.append(*(*data).data.at(index * stride_output + i));
i += 1;
};
TensorTrait::new(output_shape.span(), output_data.span())
}
fn interpolate_1d_with_x<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
scale_factor: T,
output_width_int: usize,
x: T,
antialias: Option<usize>,
mode: MODE,
nearest_mode: Option<NEAREST_MODE>,
roi: Option<Tensor<T>>,
extrapolation_value: T,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
exclude_outside: bool,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
let coordinate_transformation_mode = match coordinate_transformation_mode {
Option::Some(coordinate_transformation_mode) => coordinate_transformation_mode,
Option::None => { TRANSFORMATION_MODE::HALF_PIXEL },
};
let input_width = (*data).data.len();
let output_width = (scale_factor * NumberTrait::new_unscaled((input_width).into(), false));
let x_ori: T = match coordinate_transformation_mode {
TRANSFORMATION_MODE::HALF_PIXEL => {
(x + NumberTrait::half()) / scale_factor - NumberTrait::half()
},
TRANSFORMATION_MODE::ALIGN_CORNERS => {
let mut x_ori = NumberTrait::zero();
if output_width != NumberTrait::one() {
x_ori = x
* (NumberTrait::new_unscaled(input_width.into(), false) - NumberTrait::one())
/ (output_width - NumberTrait::one());
}
x_ori
},
TRANSFORMATION_MODE::ASYMMETRIC => { x / scale_factor },
TRANSFORMATION_MODE::TF_CROP_AND_RESIZE => {
let x_ori = match roi {
Option::Some(roi) => {
let mut x_ori = if output_width == NumberTrait::one() {
(*roi.data.at(1) - *roi.data.at(0))
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one())
/ (NumberTrait::one() + NumberTrait::one())
} else {
x
* (*roi.data.at(1) - *roi.data.at(0))
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one())
/ (output_width - NumberTrait::one())
};
x_ori = x_ori
+ *roi.data.at(0)
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one());
if x_ori < NumberTrait::zero()
|| x_ori > (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one()) {
let mut ret = ArrayTrait::new();
let mut shape = ArrayTrait::new();
shape.append(1);
ret.append(extrapolation_value);
return TensorTrait::new(shape.span(), ret.span());
};
x_ori
},
Option::None => { core::panic_with_felt252('roi cannot be None.') },
};
x_ori
},
TRANSFORMATION_MODE::PYTORCH_HALF_PIXEL => {
if output_width == NumberTrait::one() {
NumberTrait::neg(NumberTrait::<T>::half())
} else {
(x + NumberTrait::half()) / scale_factor - NumberTrait::half()
}
},
TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC => {
let adjustement: T = NumberTrait::new_unscaled(output_width_int.into(), false)
/ output_width;
let center: T = NumberTrait::new_unscaled(input_width.into(), false)
/ (NumberTrait::one() + NumberTrait::one());
let offset = center * (NumberTrait::one() - adjustement);
offset + (x + NumberTrait::half()) / scale_factor - NumberTrait::half()
},
};
let x_ori_int = x_ori.floor();
let ratio = if x_ori_int.try_into().unwrap() == x_ori {
NumberTrait::one()
} else {
x_ori - x_ori_int.try_into().unwrap()
};
let mut coeffs = match mode {
MODE::NEAREST => {
let coeffs = match antialias {
Option::Some => core::panic_with_felt252('antialias not for mode NEAREST'),
Option::None => { nearest_coeffs(ratio, nearest_mode) },
};
coeffs
},
MODE::LINEAR => {
let coeffs = match antialias {
Option::Some(antialias) => {
let coeffs = if antialias == 0 {
linear_coeffs(ratio)
} else {
linear_coeffs_antialias(ratio, scale_factor)
};
coeffs
},
Option::None => { linear_coeffs(ratio) },
};
coeffs
},
MODE::CUBIC => {
let coeffs = match antialias {
Option::Some => { cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) },
Option::None => { cubic_coeffs(ratio, cubic_coeff_a) },
};
coeffs
},
};
let n = coeffs.data.len();
let (idxes, points) = get_neighbor(x_ori, n, data);
if exclude_outside {
let mut coeffs_exclude_outside: Array<T> = array![];
let mut sum = NumberTrait::zero();
let mut i = 0;
while i != idxes.data.len() {
if *idxes.data.at(i) {
coeffs_exclude_outside.append(NumberTrait::zero());
sum += NumberTrait::zero();
} else {
coeffs_exclude_outside.append(*coeffs.data.at(i));
sum += *coeffs.data.at(i);
}
i += 1;
};
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs_exclude_outside.at(i) / sum);
i += 1;
};
coeffs = TensorTrait::new(coeffs.shape, coeff_div.span());
}
TensorTrait::matmul(@coeffs, @points)
}
fn get_neighbor<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, n: usize, data: @Tensor<T>,
) -> (Tensor<bool>, Tensor<T>) {
let pad_width: usize = NumberTrait::ceil(
NumberTrait::new_unscaled(n.into(), false)
/ (NumberTrait::<T>::one() + NumberTrait::<T>::one())
)
.try_into()
.unwrap();
let mut padded = array![];
let mut i = 0;
while i != pad_width {
padded.append(*(*data).data.at(0));
i += 1;
};
let mut i = 0;
while i != (*data).data.len() {
padded.append(*(*data).data.at(i));
i += 1;
};
let mut i = 0;
while i != pad_width {
padded.append(*(*data).data.at((*data).data.len() - 1));
i += 1;
};
x = x + NumberTrait::new_unscaled(pad_width.into(), false);
let mut idxes = get_neighbor_idxes(x, n, padded.len());
let mut idxes_centered = array![];
let mut ret = array![];
let mut i = 0;
while i != idxes.data.len() {
ret.append(*padded.at(*idxes.data.at(i)));
if *idxes.data.at(i) >= pad_width {
if (*idxes.data.at(i) - pad_width) >= (*data).data.len() {
idxes_centered.append(true);
} else {
idxes_centered.append(false);
}
} else {
idxes_centered.append(true);
}
i += 1;
};
let mut shape = array![];
shape.append(idxes.data.len());
(
TensorTrait::new(shape.span(), idxes_centered.span()),
TensorTrait::new(shape.span(), ret.span())
)
}
fn get_neighbor_idxes<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, n: usize, limit: usize,
) -> Tensor<usize> {
let _pad_width: usize = NumberTrait::<
T
>::ceil(
NumberTrait::new_unscaled(n.into(), false)
/ (NumberTrait::<T>::one() + NumberTrait::<T>::one())
)
.try_into()
.unwrap();
let mut idxes = array![];
if n % 2 == 0 {
let (mut i_low, mut i_high) = if x < NumberTrait::zero() {
(0, 1)
} else {
(NumberTrait::floor(x).try_into().unwrap(), NumberTrait::ceil(x).try_into().unwrap())
};
if i_high >= limit {
i_low = limit - 2;
i_high = limit - 1;
}
if i_low == i_high {
if i_low == 0 {
i_high = i_high + 1;
} else {
i_low = i_low - 1;
}
}
let mut i = 0;
while i != n / 2 {
if i_low - i < 0 {
idxes.append(i_high + i);
i_high += 1;
} else {
idxes.append(i_low - i);
}
if i_high + i >= limit {
i_low -= 1;
idxes.append(i_low - i);
} else {
idxes.append(i_high + i);
}
i += 1;
}
} else {
core::panic_with_felt252('MUST BE EVEN');
}
idxes = bubble_sort::bubble_sort_elements(idxes, true);
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), idxes.span())
}
fn linear_coeffs<
T,
MAG,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+TensorTrait<T>,
+Copy<T>,
+Drop<T>,
+Sub<T>
>(
mut ratio: T
) -> Tensor<T> {
let mut ret = array![];
let mut shape = array![];
shape.append(2);
ret.append(NumberTrait::one() - ratio);
ret.append(ratio);
TensorTrait::new(shape.span(), ret.span())
}
fn linear_coeffs_antialias<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, scale: T
) -> Tensor<T> {
let scale = NumberTrait::min(scale, NumberTrait::one());
let start = (NumberTrait::floor(NumberTrait::neg(NumberTrait::one()) / scale)
+ NumberTrait::one());
let footprint = (NumberTrait::one() + NumberTrait::one())
- (NumberTrait::one() + NumberTrait::one()) * start;
let mut coeffs: Array<T> = array![];
let mut sum = NumberTrait::zero();
// arange and clip + compute sum
let mut i = start;
while i != start + footprint {
let value = NumberTrait::one() - NumberTrait::abs((i - ratio) * scale);
if value < NumberTrait::zero() {
coeffs.append(NumberTrait::zero());
} else if value > NumberTrait::one() {
coeffs.append(NumberTrait::one());
sum += NumberTrait::one();
} else {
coeffs.append(value);
sum += value;
}
i += NumberTrait::one();
};
let n = coeffs.len();
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs.at(i) / sum);
i += 1;
};
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), coeff_div.span())
}
fn cubic_coeffs<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, A: Option<T>
) -> Tensor<T> {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let five = four + NumberTrait::one();
let eigth = four + four;
let A = match A {
Option::Some(A) => A,
Option::None => { NumberTrait::neg(three / four) },
};
let mut coeffs = array![];
let mut shape = array![];
coeffs
.append(
((A * (ratio + one) - five * A) * (ratio + one) + eigth * A) * (ratio + one) - four * A
);
coeffs.append(((A + two) * ratio - (A + three)) * ratio * ratio + one);
coeffs.append(((A + two) * (one - ratio) - (A + three)) * (one - ratio) * (one - ratio) + one);
coeffs
.append(
((A * ((one - ratio) + one) - five * A) * ((one - ratio) + one) + eigth * A)
* ((one - ratio) + one)
- four * A
);
shape.append(4);
TensorTrait::new(shape.span(), coeffs.span())
}
fn cubic_coeffs_antialias<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, scale: T, A: Option<T>
) -> Tensor<T> {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let scale = NumberTrait::min(scale, NumberTrait::one());
let i_start = NumberTrait::floor(NumberTrait::neg(two) / scale) + NumberTrait::one();
let i_end = two - i_start;
assert(i_end > i_start, 'i_end must be greater');
let A = match A {
Option::Some(A) => A,
Option::None => { NumberTrait::neg(three / four) },
};
let mut coeffs = array![];
let mut sum = NumberTrait::zero();
let mut i = i_start;
while i != i_end {
let value = compute_coeff(scale * (i - ratio), A);
coeffs.append(value);
sum += value;
i += NumberTrait::one();
};
let n = coeffs.len();
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs.at(i) / sum);
i += 1;
};
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), coeff_div.span())
}
fn compute_coeff<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, A: T
) -> T {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let five = four + NumberTrait::one();
let eigth = four + four;
x = x.abs();
let mut x_2 = x * x;
let mut x_3 = x * x_2;
if x <= one {
return (A + two) * x_3 - (A + three) * x_2 + one;
}
if x < two {
return A * x_3 - five * A * x_2 + eigth * A * x - four * A;
}
NumberTrait::zero()
}
fn nearest_coeffs<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, nearest_mode: Option<NEAREST_MODE>
) -> Tensor<T> {
let nearest_mode = match nearest_mode {
Option::Some(nearest_mode) => { nearest_mode },
Option::None => { NEAREST_MODE::ROUND_PREFER_FLOOR },
};
let mut ret = array![];
let mut shape = array![];
shape.append(2);
// CHECK SI C'EST UNE CONDITION ASSEZ GENERALE
if ratio == NumberTrait::one() {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
match nearest_mode {
NEAREST_MODE::ROUND_PREFER_FLOOR => {
if ratio <= NumberTrait::half() {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
} else {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
},
NEAREST_MODE::ROUND_PREFER_CEIL => {
if ratio < NumberTrait::half() {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
} else {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
},
NEAREST_MODE::FLOOR => {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
},
NEAREST_MODE::CEIL => {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
},
}
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/round.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn round<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).round()); },
Option::None => { break; }
};
};
return TensorTrait::new(self.shape, result.span());
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/scatter.cairo | use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
use core::dict::Felt252DictTrait;
use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
/// Cf: TensorTrait::scatter docstring
fn scatter<
T,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>,
updates: Tensor<T>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<T> {
let mut axis = match axis {
Option::Some(val) => val,
Option::None => 0
};
let reduction = match reduction {
Option::Some(val) => val,
Option::None => 'none'
};
let data_rank = (*self.shape).len();
let indices_rank = (indices.shape).len();
let updates_rank = (updates.shape).len();
assert((data_rank == updates_rank) & (updates_rank == indices_rank), 'must be same rank');
let data_shape = *self.shape;
let ind_max = indices.data.max().unwrap();
assert(ind_max < *data_shape.at(axis), 'index is out of bound');
let data_shape = *self.shape;
let mut indices_shape = indices.shape;
let updates_shape = updates.shape;
assert(
(*indices_shape[0] == *updates_shape[0]) & (*indices_shape[1] == *updates_shape[1]),
'shape must be same'
);
let mut output_data = array![];
let mut data_indices = indices.data;
let mut data_updates = updates.data;
let mut indices_updates: Felt252Dict<usize> = Default::default();
let mut indices_updates_reduction: Felt252Dict<Nullable<Span<usize>>> = Default::default();
let mut data_shape_copy = data_shape;
let mut indices_shape_copy = indices_shape;
*data_shape_copy.pop_front().unwrap();
*indices_shape_copy.pop_front().unwrap();
let mut indices_loop: usize = 1;
let mut data_loop: usize = 1;
if (axis == 0) {
loop {
match indices_shape_copy.pop_front() {
Option::Some(val) => { indices_loop *= *val; },
Option::None => { break; }
};
};
loop {
match data_shape_copy.pop_front() {
Option::Some(val) => { data_loop *= *val; },
Option::None => { break; }
};
};
}
let mut transpose = false;
if ((data_rank > 2) & (axis == 1)) {
let index = indices.transpose(axes: array![0, 2, 1].span());
let update = updates.transpose(axes: array![0, 2, 1].span());
data_indices = index.data;
data_updates = update.data;
indices_shape = index.shape;
axis = 2;
transpose = true;
}
if (axis == (data_rank - 1)) {
data_loop = *data_shape_copy.pop_back().unwrap();
indices_loop = *indices_shape_copy.pop_back().unwrap();
}
let mut total_count: usize = 0;
let mut shift = 0;
loop {
let mut result: usize = 0;
match data_indices.pop_front() {
Option::Some(val) => {
let value = total_count + 1;
if (axis == 0) {
let column = total_count % indices_loop;
result = (*val * data_loop) + (column);
if ((result % *data_shape.at(data_rank - 1)) != total_count % *indices_shape
.at(data_rank - 1)) {
result +=
(*data_shape.at(data_rank - 1) - *indices_shape.at(data_rank - 1));
}
}
if (axis == (data_rank - 1)) {
let mut row = total_count / indices_loop;
if ((data_rank > 2) & (row % *data_shape.at(1) >= *indices_shape.at(1))) {
shift = (*data_shape.at(1) - *indices_shape.at(1));
}
result = *val + (data_loop * (row + shift));
}
if (reduction == 'none') {
indices_updates.insert(result.into(), value.into());
} else {
let mut arr = array![];
let val = indices_updates_reduction.get(result.into());
let mut a = ArrayTrait::new();
let mut span = match match_nullable(val) {
FromNullableResult::Null(()) => a.span(),
FromNullableResult::NotNull(val) => val.unbox(),
};
loop {
match span.pop_front() {
Option::Some(val) => { arr.append(*val); },
Option::None => { break; }
};
};
arr.append(total_count);
indices_updates_reduction
.insert(result.into(), nullable_from_box(BoxTrait::new(arr.span())));
}
total_count += 1;
},
Option::None => { break; }
};
};
let mut data = *self.data;
let mut i: usize = 0;
loop {
match data.pop_front() {
Option::Some(val) => {
if (reduction == 'none') {
let value = indices_updates.get(i.into());
if (value == 0) {
output_data.append(*val);
} else {
let data_value = data_updates[value - 1];
output_data.append(*data_value);
}
} else {
let value = indices_updates_reduction.get(i.into());
let mut a = array![];
let mut span = match match_nullable(value) {
FromNullableResult::Null(()) => a.span(),
FromNullableResult::NotNull(value) => value.unbox(),
};
if (span.len() == 0) {
output_data.append(*val);
} else {
// let mut result = *data_updates.at(*span.pop_front().unwrap());
let mut result = *val;
if (reduction == 'add') {
loop {
match span.pop_front() {
Option::Some(val) => { result += *data_updates[*val]; },
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'mul') {
loop {
match span.pop_front() {
Option::Some(val) => { result *= *data_updates[*val]; },
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'max') {
loop {
match span.pop_front() {
Option::Some(val) => {
let holder = *data_updates[*val];
if (holder > result) {
result = holder;
}
},
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'min') {
loop {
match span.pop_front() {
Option::Some(val) => {
let holder = *data_updates[*val];
if (holder < result) {
result = holder;
}
},
Option::None => { break; }
};
};
output_data.append(result);
}
}
}
i += 1;
},
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
if transpose {
output_tensor = output_tensor.transpose(axes: array![0, 2, 1].span())
}
output_tensor
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/scatter_nd.cairo | use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
/// Cf: TensorTrait::scatter_nd docstring
fn scatter_nd<
T,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<T> {
let reduction = match reduction {
Option::Some(val) => val,
Option::None => 'none'
};
let data_rank = (*self.shape).len();
let mut data_shape = *self.shape;
let mut indices_shape = indices.shape;
let updates_shape = updates.shape;
let indices_last_axis = indices_shape.pop_back().unwrap();
assert(*indices_last_axis <= data_rank, 'must be <= data rank');
let ind_max = indices.data.max().unwrap();
if (data_rank > 1) {
assert(ind_max < data_rank, 'index is out of bound');
}
let mut batch_dims_shape = array![];
let mut ind: usize = 0;
loop {
match indices_shape.pop_front() {
Option::Some(val) => { batch_dims_shape.append(*val); },
Option::None => { break; }
};
};
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= *indices_last_axis) {
batch_dims_shape.append(*val);
}
},
Option::None => { break; }
};
};
let mut ind: usize = 0;
loop {
match batch_dims_shape.pop_front() {
Option::Some(val) => { assert(val == *updates_shape[ind], 'must be same'); },
Option::None => { break; }
};
};
let mut data_indices = indices.data;
let mut data_updates = updates.data;
let mut data_shape_clone = data_shape.clone();
let mut indexer = 1;
let data_shape_first = data_shape_clone.pop_front();
if data_rank >= 1 {
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => { indexer *= *val; },
Option::None => { break; }
};
}
}
let mut updates_index_dict: Felt252Dict<u32> = Default::default();
let mut dict_ind: usize = 1;
loop {
match data_indices.pop_front() {
Option::Some(val) => {
updates_index_dict.insert((*val).into(), dict_ind);
dict_ind += 1;
},
Option::None => { break; }
};
};
let mut output_data: Array<T> = array![];
let mut data = *self.data;
let mut index: usize = 0;
let mut inner_index: usize = 0;
let num = *data_shape_first.unwrap();
while index != num {
let comp_index = updates_index_dict.get(index.into());
if comp_index == 0 {
loop {
if (inner_index == indexer) {
inner_index = 0;
break;
}
let val = *data.at((index * indexer) + inner_index);
output_data.append(val);
inner_index += 1;
};
} else {
loop {
if (inner_index == indexer) {
inner_index = 0;
break;
}
if (reduction == 'none') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
output_data.append(*val);
}
if (reduction == 'add') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
output_data.append(*val + data_val);
}
if (reduction == 'mul') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
output_data.append((*val) * data_val);
}
if (reduction == 'max') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
if (*val > data_val) {
output_data.append(*val);
} else {
output_data.append(data_val);
}
}
if (reduction == 'min') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
if (*val > data_val) {
output_data.append(data_val);
} else {
output_data.append(*val);
}
}
inner_index += 1;
}
}
index += 1;
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
output_tensor
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/shrink.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::shrink docstring
fn shrink<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut self: Tensor<T>, bias: Option<T>, lambd: Option<T>
) -> Tensor<T> {
let bias: T = if bias.is_some() {
bias.unwrap()
} else {
NumberTrait::zero()
};
let lambd: T = if lambd.is_some() {
lambd.unwrap()
} else {
NumberTrait::half()
};
let mut data_result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if (*item) < lambd.neg() {
let mut y = NumberTrait::add(*item, bias);
data_result.append(y);
} else if (*item) > lambd {
let mut y = NumberTrait::sub(*item, bias);
data_result.append(y);
} else {
data_result.append(NumberTrait::zero());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, data_result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/sign.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn sign<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sign()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/sin.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::sin docstring
fn sin<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sin()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/sinh.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::sinh docstring
fn sinh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sinh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/sqrt.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn sqrt<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sqrt()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/tanh.cairo | use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::tanh docstring
fn tanh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).tanh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/where.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::where docstring
fn where<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TFTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, x: @Tensor<T>, y: @Tensor<T>
) -> Tensor<T> {
let xy_shape = broadcast_shape(*x.shape, *y.shape);
let broadcasted_shape = broadcast_shape(*self.shape, xy_shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_cond = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_x = broadcast_index_mapping(*x.shape, indices_broadcasted);
let indices_y = broadcast_index_mapping(*y.shape, indices_broadcasted);
let res = NumberTrait::where(
*(*self.data)[indices_cond], *(*x.data)[indices_x], *(*y.data)[indices_y]
);
result.append(res);
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/math/xor.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::xor docstring
fn xor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl UsizeFTensor: TensorTrait<usize>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if NumberTrait::xor(*(*y.data)[indices_self], *(*z.data)[indices_other]) {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/ml.cairo | mod array_feature_extractor;
mod label_encoder;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/ml/array_feature_extractor.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::array_feature_extractor docstring
fn array_feature_extractor<
T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: Tensor<T>, indices: Tensor<usize>
) -> Tensor<T> {
assert(indices.shape.len() == 1, 'Indices must be a 1D tensor');
if self.shape.len() == 1 {
return process_1D_tensor(self, indices);
}
let (output_shape, total_elements) = calculate_output_shape::<T>(self.shape, indices);
let output_data = calculate_output_data::<T>(self, indices, total_elements);
TensorTrait::new(output_shape.span(), output_data.span())
}
fn process_1D_tensor<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Tensor<T>, indices: Tensor<usize>
) -> Tensor<T> {
let mut output_data: Array<T> = array![];
let mut indices_values: Span<usize> = indices.data;
let self_len = *self.shape.at(0);
loop {
match indices_values.pop_front() {
Option::Some(current_indices_value) => {
assert(*current_indices_value < self_len, 'Indices out of range');
let mut current_data_value = *self.data.at(*current_indices_value);
output_data.append(current_data_value);
},
Option::None => { break; }
};
};
TensorTrait::new(indices.shape, output_data.span())
}
fn calculate_output_shape<
T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
input_shape: Span<usize>, indices: Tensor<usize>
) -> (Array<usize>, usize) {
let mut total_elements: usize = 1;
let mut output_shape: Array<usize> = array![];
let mut input_shape_copy = input_shape;
let mut input_shape_counter: usize = 0;
let breaker = input_shape.len() - 2;
loop {
match input_shape_copy.pop_front() {
Option::Some(current_shape_value) => {
if input_shape_counter > breaker {
break;
}
output_shape.append(*current_shape_value);
total_elements = total_elements * *current_shape_value;
input_shape_counter += 1;
},
Option::None => { break; }
};
};
output_shape.append(indices.data.len());
(output_shape, total_elements)
}
fn calculate_output_data<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Tensor<T>, indices: Tensor<usize>, total_elements: usize
) -> Array<T> {
let last_tensor_axis: usize = *self.shape.at(self.shape.len() - 1);
let mut output_data: Array<T> = array![];
let strides: Span<usize> = TensorTrait::stride(@self);
let mut element_counter: usize = 0;
let mut stride_l2 = *strides.at(strides.len() - 2);
let mut stride_l1 = *strides.at(strides.len() - 1);
while element_counter != total_elements {
let mut base_index = if strides.len() > 1 {
element_counter * stride_l2
} else {
0
};
let mut indices_values = indices.data;
loop {
match indices_values.pop_front() {
Option::Some(current_indices_value) => {
assert(*current_indices_value < last_tensor_axis, 'Indices out of range');
let mut flat_index = base_index + *current_indices_value * (stride_l1);
let mut current_data_value = *self.data.at(flat_index);
output_data.append(current_data_value);
},
Option::None => { break; }
};
};
element_counter += 1;
};
output_data
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/ml/label_encoder.cairo | use core::array::ArrayTrait;
use core::option::OptionTrait;
use core::array::SpanTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
use core::dict::Felt252DictTrait;
use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
use core::debug::PrintTrait;
use core::traits::Into;
use core::traits::TryInto;
/// Cf: TensorTrait::label_encoder docstring
fn label_encoder<
T, +Drop<T>, +Copy<T>, +AddEq<T>, +TensorTrait<T>, +PartialOrd<T>, +Into<T, felt252>,
>(
// self: @Tensor<T>, default: T, keys: Array<T>, values: Array<T>
self: @Tensor<T>,
default_list: Option<Span<T>>,
default_tensor: Option<Tensor<T>>,
keys: Option<Span<T>>,
keys_tensor: Option<Tensor<T>>,
values: Option<Span<T>>,
values_tensor: Option<Tensor<T>>,
) -> Tensor<T> {
let mut default = match default_list {
Option::Some(value) => value,
Option::None => {
match default_tensor {
Option::Some(value) => value.data,
Option::None => { core::panic_with_felt252('None') },
}
}
};
let default = match default.pop_front() {
Option::Some(value) => *value,
Option::None => { core::panic_with_felt252('None') }
};
let mut keys = match keys {
Option::Some(value) => { value },
Option::None => {
match keys_tensor {
Option::Some(value) => { value.data },
Option::None => { core::panic_with_felt252('None') },
}
}
};
let mut values = match values {
Option::Some(value) => { value },
Option::None => {
match values_tensor {
Option::Some(value) => { value.data },
Option::None => { core::panic_with_felt252('None') },
}
}
};
assert(keys.len() == values.len(), 'keys must be eq to values');
let mut key_value_dict: Felt252Dict<Nullable<T>> = Default::default();
let mut output_data = ArrayTrait::<T>::new();
loop {
let key = match keys.pop_front() {
Option::Some(key) => key,
Option::None => { break; }
};
let value = match values.pop_front() {
Option::Some(value) => value,
Option::None => { break; }
};
key_value_dict.insert((*key).into(), nullable_from_box(BoxTrait::new(*value)));
};
let mut data = *self.data;
loop {
match data.pop_front() {
Option::Some(val) => {
let value = *val;
let res = key_value_dict.get(value.into());
let mut span = match match_nullable(res) {
FromNullableResult::Null => default,
FromNullableResult::NotNull(res) => res.unbox(),
};
output_data.append(span);
},
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
return output_tensor;
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization.cairo | mod quantize_linear;
mod dynamic_quantize_linear;
mod dequantize_linear;
mod qlinear_matmul;
mod qlinear_concat;
mod qlinear_add;
mod qlinear_mul;
mod qlinear_leakyrelu;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/dequantize_linear.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::check_compatibility;
use orion::utils::saturate;
/// Cf: PerfomanceTrait::dequantize_linear docstring
fn dequantize_linear<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
x: @Tensor<Q>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T> {
if (*x_scale.data).len() == 1 && (*x_zero_point.data).len() == 1 {
dequantize_element_wise(*x, *x_scale.data[0], *x_zero_point.data[0])
} else {
check_compatibility(*x.shape, *x_scale.shape);
check_compatibility(*x.shape, *x_zero_point.shape);
check_compatibility(*x_scale.shape, *x_zero_point.shape);
dequantize_per_axis(@(*x).into(), x_scale, x_zero_point)
}
}
fn dequantize_per_axis<
T,
impl TTensor: TensorTrait<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T> {
(*x - *x_zero_point) * *x_scale
}
fn dequantize_element_wise<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
mut x: Tensor::<Q>, x_scale: T, x_zero_point: T
) -> Tensor::<T> {
let mut result_data: Array<T> = array![];
loop {
match x.data.pop_front() {
Option::Some(item) => {
let dequantized = dequantize(*item, x_scale, x_zero_point);
result_data.append(dequantized);
},
Option::None => { break; }
};
};
TensorTrait::new(x.shape, result_data.span())
}
fn dequantize<
Q, T, impl QIntoT: Into<Q, T>, impl TSub: Sub<T>, impl TMul: Mul<T>, impl TDrop: Drop<T>
>(
x: Q, x_scale: T, x_zero_point: T
) -> T {
(x.into() - x_zero_point) * x_scale
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/dynamic_quantize_linear.cairo | use orion::numbers::NumberTrait;
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::utils::saturate;
fn dynamic_quantize_linear<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, min: T, max: T, zero: T, one: T
) -> (Tensor<Q>, Tensor<T>, Tensor<T>) {
// y_scale = (maximum(0, max(x)) - minimum(0, min(x))) / (qmax - qmin)
let mut x_max: T = x.max_in_tensor();
let mut x_min: T = x.min_in_tensor();
if x_max < zero {
x_max = zero;
}
if x_min > zero {
x_min = zero
}
// scale = max == min ? 1.0f : (max - min) / float(qmax - qmin);
let mut y_scale_values = ArrayTrait::new();
let y_scale_value: T = (x_max - x_min) / (max - min);
if x_max == x_min {
y_scale_values.append(one);
} else {
y_scale_values.append(y_scale_value);
}
let mut y_scale_tensor_shape: Array<u32> = array![];
y_scale_tensor_shape.append(y_scale_values.len());
let y_scale = TensorTrait::<
T
>::new(shape: y_scale_tensor_shape.span(), data: y_scale_values.span(),);
// intermediate_zero_point = qmin - min(x)/y_scale
let intermediate_zero_point: T = min - x_min / y_scale_value;
// y_zero_point = cast(round(saturate(itermediate_zero_point)))
let mut y_zero_point_value: T = saturate(min, max, intermediate_zero_point);
let mut y_zero_point_values: Array<T> = array![];
y_zero_point_values.append(y_zero_point_value);
let mut y_zero_point_tensor_shape: Array<u32> = array![];
y_zero_point_tensor_shape.append(y_zero_point_values.len());
let mut y_zero_point_values: Array<T> = array![];
y_zero_point_values.append(y_zero_point_value);
let mut y_zero_point = TensorTrait::<
T
>::new(shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(),);
// y_zero_point = y_zero_point.round(); // tensor<FP> only supported!
// y = saturate (round (x / y_scale) + y_zero_point)
(quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/qlinear_add.cairo | use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_add<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = (dequantized_a + dequantized_b).into();
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/qlinear_concat.cairo | use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::math::concat::{
validate_shapes, compute_output_size, concatenate_data
};
fn qlinear_concat<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
axis: usize,
min: T,
max: T
) -> Tensor<Q> {
assert(tensors.len() == scales.len(), 'Each Tensors must have a scale');
assert(tensors.len() == zero_points.len(), 'Each Tensors must have a scale');
//let mut x = TensorTrait::concat(tensors: array![dequantized_a, dequantized_b].span(), axis: axis);
let mut x = concat_dequantize(tensors, scales, zero_points, axis, min, max);
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
fn concat_dequantize<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
axis: usize,
min: T,
max: T
) -> Tensor<T> {
assert(tensors.len() >= 2, 'Input tensors must be > 1');
let base_tensor = *tensors.at(0);
let base_shape = base_tensor.shape;
let dimension = base_shape.len();
assert(dimension > axis, 'Out of bounds for dimension');
// Validate shapes of tensors
validate_shapes(tensors, base_shape, axis);
// Calculate output size
let output_size = compute_output_size(base_shape, tensors, axis);
// Dequantize tensors
let tensors = dequantize_tensors(tensors, scales, zero_points, min, max);
// Concatenate tensor data
let output_data: Array<T> = concatenate_data(tensors, axis, base_shape);
TensorTrait::<T>::new(output_size.span(), output_data.span())
}
fn dequantize_tensors<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
//MAybe numberTRait
>(
mut tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
min: T,
max: T
) -> Span<Tensor<T>> {
let mut array: Array<Tensor<T>> = array![];
let mut i = 0;
loop {
match tensors.pop_front() {
Option::Some(tensor) => {
array
.append(dequantize_linear(@(*tensor), @(*scales.at(i)), @(*zero_points.at(i))));
},
Option::None => { break; }
};
i += 1;
};
array.span()
}
/// # tensor.concat
///
/// ```rust
/// fn concat(tensors: Span<Tensor<T>>, axis: usize, ) -> Tensor<T>;
/// ```
///
/// Concatenate a list of tensors into a single tensor.
///
/// ## Args
///
/// * `tensors`(` Span<Tensor<T>>,`) - Array of the input tensors.
/// * `axis`(`usize`) - Axis to concat on.
///
/// ## Panics
///
/// * Panic if tensor length is not greater than 1.
/// * Panics if dimension is not greater than axis.
///
/// ## Returns
///
/// A new `Tensor<T>` concatenated tensor of the input tensors.
///
/// ## Example
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn concat_example() -> Tensor<u32> {
/// let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span(),);
/// let result = TensorTrait::concat(tensors: array![tensor1, tensor2].span(), axis: 0);
/// return result;
/// }
/// >>> [[0. 1.]
/// [2. 3.],
/// [0. 1.]
/// [2. 3.]]
///
/// result.shape
/// >>> (4, 2)
///
/// let result = TensorTrait::concat(tensors: array![tensor1, tensor2].span(), axis: 1);
/// return result;
/// }
/// >>> [[0. 1., 0., 1.]
/// [2. 3., 2., 3.]]
///
/// result.shape
/// >>> (2, 4 )
/// ```
///
///fn concat(tensors: Span<Tensor<T>>, axis: usize,) -> Tensor<T>;
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/qlinear_leakyrelu.cairo | use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_leakyrelu<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
a: @Tensor<Q>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, alpha: T, min: T, max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut result_data: Array<T> = array![];
loop {
match dequantized_a.data.pop_front() {
Option::Some(elem) => {
if *elem < NumberTrait::zero() {
result_data.append(*elem * alpha);
} else {
result_data.append(*elem);
}
},
Option::None => { break; }
};
};
quantize_linear(
@TensorTrait::new(dequantized_a.shape, result_data.span()), a_scale, a_zero_point, min, max
)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/qlinear_matmul.cairo | use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: TensorTrait::qlinear_matmul docstring
fn qlinear_matmul<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
//impl TTensorTryInto: TryInto<Tensor<T>, Tensor<Q>>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let a_shape = *a.shape;
let b_shape = *b.shape;
let a_ndim = (a_shape).len();
let b_ndim = (b_shape).len();
//! Case: Both tensors are max 2-dimensional
if a_ndim <= 2 && b_ndim <= 2 {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = dequantized_a.matmul(@dequantized_b);
return quantize_linear(@x, y_scale, y_zero_point, min, max);
}
// (D1, D2, M, K) * (D1, D2, K, N) -> (D1, D2, M, N)
assert(a_ndim == b_ndim, 'dim missmatch');
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x_shape: Array<usize> = array![];
let mut x_data: Array<T> = array![];
assert(a_shape[a_ndim - 1] == b_shape[b_ndim - 2], 'incompatible dim for matmul');
let m = *a_shape[a_ndim - 2];
let k = *a_shape[a_ndim - 1];
let n = *b_shape[b_ndim - 1];
let mut a_shape_reduced: Array<usize> = array![];
a_shape_reduced.append(m);
a_shape_reduced.append(k);
let mut b_shape_reduced: Array<usize> = array![];
b_shape_reduced.append(k);
b_shape_reduced.append(n);
let mut i = 0;
while i != stride(a_shape) / (m * k) {
result_updates(
@subtensor(@dequantized_a, i * (m * k), a_shape_reduced.span()),
@subtensor(@dequantized_b, i * (k * n), b_shape_reduced.span()),
ref x_data
);
i += 1;
};
x_shape(ref x_shape, a_shape, m, n);
let x = TensorTrait::new(x_shape.span(), x_data.span());
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
fn x_shape(ref x_data: Array<usize>, mut shape: Span<usize>, m: usize, n: usize) {
while shape.len() != 2 {
match shape.pop_front() {
Option::Some(elem) => { x_data.append(*elem); },
Option::None => { break; }
};
};
x_data.append(m);
x_data.append(n);
}
fn stride(mut shape: Span<usize>) -> usize {
let shape_len = shape.len();
assert(shape_len > 0, 'shape cannot be empty');
let mut accumulated: usize = 1;
loop {
match shape.pop_back() {
Option::Some(i) => { accumulated *= *i; },
Option::None => { break; }
};
};
accumulated
}
fn subtensor<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
x: @Tensor<T>, start: usize, shape: Span<usize>
) -> Tensor::<T> {
let mut data = ArrayTrait::<T>::new();
let mut stride = stride(shape);
let mut i = 0;
while i != stride {
data.append(*x.data[start + i]);
i += 1;
};
TensorTrait::new(shape, data.span())
}
fn result_updates<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mat1: @Tensor<T>, mat2: @Tensor<T>, ref result_data: Array<T>
) {
let m = *mat1.shape[0];
let n = *mat1.shape[1];
let p = *mat2.shape[1];
let mat1 = *mat1.data;
let mat2 = *mat2.data;
let mut result_shape: Array<usize> = array![];
result_shape.append(m);
result_shape.append(p);
let mut i = 0_usize;
while i != m {
let mut j = 0_usize;
while j != p {
let mut sum: T = NumberTrait::zero();
let mut k = 0_usize;
while k != n {
let mat1_index = i * n + k;
let mat2_index = k * p + j;
sum += *mat1[mat1_index] * *mat2[mat2_index];
k += 1;
};
result_data.append(sum);
j += 1;
};
i += 1;
};
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/qlinear_mul.cairo | use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_mul<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = (dequantized_a * dequantized_b).into();
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
| https://github.com/gizatechxyz/orion |
src/operators/tensor/quantization/quantize_linear.cairo | use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::check_compatibility;
use orion::operators::tensor::math::arithmetic::saturated_add;
use orion::utils::saturate;
/// Cf: TensorTrait::quantize_linear docstring
fn quantize_linear<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>, min: T, max: T
) -> Tensor::<Q> {
if (*y_scale.data).len() == 1 && (*y_zero_point.data).len() == 1 {
quantize_element_wise(*x, *y_scale.data[0], *y_zero_point.data[0], min, max)
} else {
check_compatibility(*x.shape, *y_scale.shape);
check_compatibility(*x.shape, *y_zero_point.shape);
check_compatibility(*y_scale.shape, *y_zero_point.shape);
quantize_per_axis(x, y_scale, y_zero_point, min, max)
}
}
fn quantize_per_axis<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>, min: T, max: T
) -> Tensor::<Q> {
saturated_add(@(*x / *y_scale), y_zero_point, min, max)
}
fn quantize_element_wise<
T,
Q,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
impl TPartialOrd: PartialOrd<T>,
impl TDiv: Div<T>,
impl TAdd: Add<T>,
impl TTryIntoQ: TryInto<T, Q>,
impl QTensor: TensorTrait<Q>
>(
mut x: Tensor::<T>, y_scale: T, y_zero_point: T, min: T, max: T
) -> Tensor::<Q> {
let mut result_data: Array<Q> = array![];
loop {
match x.data.pop_front() {
Option::Some(item) => {
let quantized = quantize(*item, y_scale, y_zero_point, min, max);
result_data.append(quantized);
},
Option::None => { break; }
};
};
TensorTrait::new(x.shape, result_data.span())
}
fn quantize<
T,
Q,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPartialOrd: PartialOrd<T>,
impl TDiv: Div<T>,
impl TAdd: Add<T>,
impl TTryIntoQ: TryInto<T, Q>
>(
x: T, y_scale: T, y_zero_point: T, min: T, max: T
) -> Q {
saturate(min, max, ((x / y_scale) + y_zero_point)).try_into().unwrap()
}
| https://github.com/gizatechxyz/orion |
src/operators/vec.cairo | use core::box::BoxTrait;
use core::traits::Into;
use core::nullable::{Nullable, match_nullable, FromNullableResult, nullable_from_box};
use alexandria_data_structures::vec::{VecTrait};
use orion::numbers::NumberTrait;
struct NullableVec<T> {
items: Felt252Dict<Nullable<T>>,
len: usize,
}
impl DestructNullableVec<T, impl TDrop: Drop<T>> of Destruct<NullableVec<T>> {
fn destruct(self: NullableVec<T>) nopanic {
self.items.squash();
}
}
impl NullableVecImpl<
T, MAG, impl TDrop: Drop<T>, impl TCopy: Copy<T>, +NumberTrait<T, MAG>
> of VecTrait<NullableVec<T>, T> {
fn new() -> NullableVec<T> {
NullableVec { items: Default::default(), len: 0 }
}
fn get(ref self: NullableVec<T>, index: usize) -> Option<T> {
if (index < self.len()) {
return match match_nullable(self.items.get(index.into())) {
FromNullableResult::Null(()) => { Option::Some(NumberTrait::zero()) },
FromNullableResult::NotNull(val) => { Option::Some(val.unbox()) },
};
} else {
Option::<T>::None
}
}
fn at(ref self: NullableVec<T>, index: usize) -> T {
assert(index < self.len(), 'Index out of bounds');
return match self.get(index) {
Option::Some(val) => val,
Option::None => NumberTrait::zero(),
};
}
fn push(ref self: NullableVec<T>, value: T) -> () {
self.items.insert(self.len.into(), nullable_from_box(BoxTrait::new(value)));
self.len = core::integer::u32_wrapping_add(self.len, 1_usize);
}
fn set(ref self: NullableVec<T>, index: usize, value: T) {
if index >= self.len() {
self.len = index + 1;
}
self.items.insert(index.into(), nullable_from_box(BoxTrait::new(value)));
}
fn len(self: @NullableVec<T>) -> usize {
*self.len
}
}
| https://github.com/gizatechxyz/orion |
src/test_helper.cairo | mod tensor;
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor.cairo | mod fixed_point;
mod i32;
mod i8;
mod u32;
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/fixed_point.cairo | mod fp8x23;
mod fp16x16;
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/fixed_point/fp16x16.cairo | use orion::numbers::fixed_point::core::{FixedTrait};
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
// 1D
fn fp_tensor_1x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_1x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
// 2D
fn fp_tensor_2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
// 3D
fn fp_tensor_2x2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false),
FixedTrait::new_unscaled(12, false),
FixedTrait::new_unscaled(13, false),
FixedTrait::new_unscaled(14, false),
FixedTrait::new_unscaled(15, false),
FixedTrait::new_unscaled(16, false),
FixedTrait::new_unscaled(17, false),
FixedTrait::new_unscaled(18, false),
FixedTrait::new_unscaled(19, false),
FixedTrait::new_unscaled(20, false),
FixedTrait::new_unscaled(21, false),
FixedTrait::new_unscaled(22, false),
FixedTrait::new_unscaled(23, false),
FixedTrait::new_unscaled(24, false),
FixedTrait::new_unscaled(25, false),
FixedTrait::new_unscaled(26, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true),
FixedTrait::new_unscaled(12, true),
FixedTrait::new_unscaled(13, true),
FixedTrait::new_unscaled(14, true),
FixedTrait::new_unscaled(15, true),
FixedTrait::new_unscaled(16, true),
FixedTrait::new_unscaled(17, true),
FixedTrait::new_unscaled(18, true),
FixedTrait::new_unscaled(19, true),
FixedTrait::new_unscaled(20, true),
FixedTrait::new_unscaled(21, true),
FixedTrait::new_unscaled(22, true),
FixedTrait::new_unscaled(23, true),
FixedTrait::new_unscaled(24, true),
FixedTrait::new_unscaled(25, true),
FixedTrait::new_unscaled(26, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/fixed_point/fp8x23.cairo | use orion::numbers::fixed_point::core::{FixedTrait};
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
// 1D
fn fp_tensor_1x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_1x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
// 2D
fn fp_tensor_2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
// 3D
fn fp_tensor_2x2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false),
FixedTrait::new_unscaled(12, false),
FixedTrait::new_unscaled(13, false),
FixedTrait::new_unscaled(14, false),
FixedTrait::new_unscaled(15, false),
FixedTrait::new_unscaled(16, false),
FixedTrait::new_unscaled(17, false),
FixedTrait::new_unscaled(18, false),
FixedTrait::new_unscaled(19, false),
FixedTrait::new_unscaled(20, false),
FixedTrait::new_unscaled(21, false),
FixedTrait::new_unscaled(22, false),
FixedTrait::new_unscaled(23, false),
FixedTrait::new_unscaled(24, false),
FixedTrait::new_unscaled(25, false),
FixedTrait::new_unscaled(26, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true),
FixedTrait::new_unscaled(12, true),
FixedTrait::new_unscaled(13, true),
FixedTrait::new_unscaled(14, true),
FixedTrait::new_unscaled(15, true),
FixedTrait::new_unscaled(16, true),
FixedTrait::new_unscaled(17, true),
FixedTrait::new_unscaled(18, true),
FixedTrait::new_unscaled(19, true),
FixedTrait::new_unscaled(20, true),
FixedTrait::new_unscaled(21, true),
FixedTrait::new_unscaled(22, true),
FixedTrait::new_unscaled(23, true),
FixedTrait::new_unscaled(24, true),
FixedTrait::new_unscaled(25, true),
FixedTrait::new_unscaled(26, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/i32.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I32Tensor;
// 1D
fn i32_tensor_1x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i32> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_1x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i32> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
// 2D
fn i32_tensor_2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7, -8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x1_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i32> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x1_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i32> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
// 3D
fn i32_tensor_2x2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i32> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i32> = array![
0,
-1,
-2,
-3,
-4,
-5,
-6,
-7,
-8,
-9,
-10,
-11,
-12,
-13,
-14,
-15,
-16,
-17,
-18,
-19,
-20,
-21,
-22,
-23,
-24,
-25,
-26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/i8.cairo | use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I8Tensor;
// 1D
fn i8_tensor_1x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i8> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_1x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i8> = array![0, -1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
// 2D
fn i8_tensor_2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7, -8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
return tensor;
}
fn i8_tensor_3x1_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i8> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x1_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i8> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
// 3D
fn i8_tensor_2x2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i8> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i8> = array![
0,
-1,
-2,
-3,
-4,
-5,
-6,
-7,
-8,
-9,
-10,
-11,
-12,
-13,
-14,
-15,
-16,
-17,
-18,
-19,
-20,
-21,
-22,
-23,
-24,
-25,
-26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
| https://github.com/gizatechxyz/orion |
src/test_helper/tensor/u32.cairo | use orion::operators::tensor::U32Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
// 1D
fn u32_tensor_1x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<u32> = array![0, 1, 2];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
// 2D
fn u32_tensor_2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x1_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<u32> = array![0, 1, 2];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_2x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
// 3D
fn u32_tensor_2x2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x3x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<u32> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
| https://github.com/gizatechxyz/orion |
src/utils.cairo | use orion::operators::tensor::{Tensor, TensorTrait};
fn u32_max(a: u32, b: u32) -> u32 {
if a > b {
a
} else {
b
}
}
fn saturate<T, impl TCopy: Copy<T>, impl TDrop: Drop<T>, impl PartialOrdT: PartialOrd<T>>(
min: T, max: T, x: T
) -> T {
if x < min {
min
} else if x > max {
max
} else {
x
}
}
fn assert_eq<T, impl TPartialEq: PartialEq<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
lhs: T, rhs: T
) {
assert(lhs == rhs, 'should be equal');
}
fn assert_seq_eq<T, impl TPartialEq: PartialEq<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
lhs: Array<T>, rhs: Array<T>
) {
assert(lhs.len() == rhs.len(), 'should be equal');
let mut i = 0;
while i != lhs.len() {
assert_eq(lhs[i], rhs[i]);
i += 1;
}
}
fn get_row<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> {
assert((*self).shape.len() == 2, 'Expected a 2D tensor');
let row_length = *self.shape[1];
let start = row * row_length;
(*self).data.slice(start, row_length)
}
| https://github.com/gizatechxyz/orion |
tests/lib.cairo | mod numbers;
mod performance;
mod tensor_core;
mod nodes;
mod ml;
mod operators;
| https://github.com/gizatechxyz/orion |
tests/ml.cairo | mod tree_ensemble_classifier;
mod tree_ensemble_regressor;
mod linear_regressor_test;
mod linear_classifier_test;
mod svm_regressor_test;
mod svm_classifier_test;
mod normalizer_test;
| https://github.com/gizatechxyz/orion |
tests/ml/linear_classifier_test.cairo | use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::operators::ml::linear::linear_classifier::{
LinearClassifierTrait, POST_TRANSFORM, LinearClassifier
};
use core::debug::PrintTrait;
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_multi_none() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 157942, sign: false }, '*scores[0] == 2.41');
assert(*scores.data[1] == FP16x16 { mag: 138936, sign: true }, '*scores[1] == -2.12');
assert(*scores.data[2] == FP16x16 { mag: 38666, sign: false }, '*scores[2] == 0.59');
assert(*scores.data[3] == FP16x16 { mag: 43910, sign: false }, '*scores[3] == 0.67');
assert(*scores.data[4] == FP16x16 { mag: 74710, sign: true }, '*scores[4] == -1.14');
assert(*scores.data[5] == FP16x16 { mag: 88472, sign: false }, '*scores[5] == 1.35');
assert(*scores.data[6] == FP16x16 { mag: 70122, sign: true }, '*scores[6] == -1.07');
assert(*scores.data[7] == FP16x16 { mag: 10484, sign: true }, '*scores[7] == -0.16');
assert(*scores.data[8] == FP16x16 { mag: 138278, sign: false }, '*scores[8] == 2.11');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_multi_softmax() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 55879, sign: false }, '*scores[0] == 0.852656');
assert(*scores.data[1] == FP16x16 { mag: 602, sign: false }, '*scores[1] == 0.009192');
assert(*scores.data[2] == FP16x16 { mag: 9053, sign: false }, '*scores[2] == 0.138152');
assert(*scores.data[3] == FP16x16 { mag: 20888, sign: false }, '*scores[3] == 0.318722');
assert(*scores.data[4] == FP16x16 { mag: 3418, sign: false }, '*scores[4] == 0.05216');
assert(*scores.data[5] == FP16x16 { mag: 41229, sign: false }, '*scores[5] == 0.629118');
assert(*scores.data[6] == FP16x16 { mag: 2380, sign: false }, '*scores[6] == 0.036323');
assert(*scores.data[7] == FP16x16 { mag: 5914, sign: false }, '*scores[7] == 0.090237');
assert(*scores.data[8] == FP16x16 { mag: 57241, sign: false }, '*scores[8] == 0.87344');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_multi_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 55879, sign: false }, '*scores[0] == 0.852656');
assert(*scores.data[1] == FP16x16 { mag: 602, sign: false }, '*scores[1] == 0.009192');
assert(*scores.data[2] == FP16x16 { mag: 9053, sign: false }, '*scores[2] == 0.138152');
assert(*scores.data[3] == FP16x16 { mag: 20888, sign: false }, '*scores[3] == 0.318722');
assert(*scores.data[4] == FP16x16 { mag: 3418, sign: false }, '*scores[4] == 0.05216');
assert(*scores.data[5] == FP16x16 { mag: 41229, sign: false }, '*scores[5] == 0.629118');
assert(*scores.data[6] == FP16x16 { mag: 2380, sign: false }, '*scores[6] == 0.036323');
assert(*scores.data[7] == FP16x16 { mag: 5914, sign: false }, '*scores[7] == 0.090237');
assert(*scores.data[8] == FP16x16 { mag: 57241, sign: false }, '*scores[8] == 0.87344');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_multi_logistic() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 2, 'labels[1] == 2');
assert(*labels[2] == 2, 'labels[2] == 2');
assert(labels.len() == 3, 'len(labels) == 3');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 60135, sign: false }, '*scores[0] == 0.917587');
assert(*scores.data[1] == FP16x16 { mag: 7023, sign: false }, '*scores[1] == 0.107168');
assert(*scores.data[2] == FP16x16 { mag: 42163, sign: false }, '*scores[2] == 0.643365');
assert(*scores.data[3] == FP16x16 { mag: 43351, sign: false }, '*scores[3] == 0.661503');
assert(*scores.data[4] == FP16x16 { mag: 15881, sign: false }, '*scores[4] == 0.24232');
assert(*scores.data[5] == FP16x16 { mag: 52043, sign: false }, '*scores[5] == 0.79413');
assert(*scores.data[6] == FP16x16 { mag: 16738, sign: false }, '*scores[6] == 0.255403');
assert(*scores.data[7] == FP16x16 { mag: 30152, sign: false }, '*scores[7] == 0.460085');
assert(*scores.data[8] == FP16x16 { mag: 58450, sign: false }, '*scores[8] == 0.891871');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_binary_none() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 624559, sign: true }, '*scores[0] == -9.53');
assert(*scores.data[1] == FP16x16 { mag: 624559, sign: false }, '*scores[1] == 9.53');
assert(*scores.data[2] == FP16x16 { mag: 435817, sign: true }, '*scores[2] == -6.65');
assert(*scores.data[3] == FP16x16 { mag: 435817, sign: false }, '*scores[3] == 6.65');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_binary_logistic() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 4, sign: false }, '*scores[0] == 7.263436e-05');
assert(*scores.data[1] == FP16x16 { mag: 65532, sign: false }, '*scores[1] == 9.999274e-01');
assert(*scores.data[2] == FP16x16 { mag: 84, sign: false }, '*scores[2] == 1.292350e-03');
assert(*scores.data[3] == FP16x16 { mag: 65452, sign: false }, '*scores[3] == 9.999983e-01');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_binary_softmax() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 0, sign: false }, '*scores[0] == 5.276517e-09');
assert(*scores.data[1] == FP16x16 { mag: 65535, sign: false }, '*scores[1] == 1.000000');
assert(*scores.data[2] == FP16x16 { mag: 0, sign: false }, '*scores[2] == 1.674492e-06');
assert(*scores.data[3] == FP16x16 { mag: 65535, sign: false }, '*scores[3] == 9.999983e-01');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_binary_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 0, sign: false }, '*scores[0] == 5.276517e-09');
assert(*scores.data[1] == FP16x16 { mag: 65535, sign: false }, '*scores[1] == 1.000000');
assert(*scores.data[2] == FP16x16 { mag: 0, sign: false }, '*scores[2] == 1.674492e-06');
assert(*scores.data[3] == FP16x16 { mag: 65535, sign: false }, '*scores[3] == 9.999983e-01');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_unary_none() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 146146, sign: false }, '*scores[0] == 2.23');
assert(*scores.data[1] == FP16x16 { mag: 42596, sign: true }, '*scores[1] == -0.65');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_unary_logistic() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 59173, sign: false }, '*scores[0] == 0.902911');
assert(*scores.data[1] == FP16x16 { mag: 22479, sign: false }, '*scores[1] == 0.34299');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_unary_softmax() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 65536, sign: false }, '*scores[0] == 1');
assert(*scores.data[1] == FP16x16 { mag: 65536, sign: false }, '*scores[1] == 1');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_classifier_unary_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
// ASSERT SCORES
assert(*scores.data[0] == FP16x16 { mag: 65536, sign: false }, '*scores[0] == 1');
assert(*scores.data[1] == FP16x16 { mag: 65536, sign: false }, '*scores[1] == 1');
}
// ============ HELPER ============ //
fn linear_classifier_helper(
post_transform: POST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![0, 1, 2].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
FP16x16 { mag: 38011, sign: false },
FP16x16 { mag: 19005, sign: false },
FP16x16 { mag: 5898, sign: false },
]
.span();
let intercepts: Span<FP16x16> = array![
FP16x16 { mag: 176947, sign: false },
FP16x16 { mag: 176947, sign: true },
FP16x16 { mag: 32768, sign: false },
]
.span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
}
fn linear_classifier_helper_binary(
post_transform: POST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![0, 1].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 655360, sign: false },].span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![2, 3].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
}
fn linear_classifier_helper_unary(
post_transform: POST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![1].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 176947, sign: false },].span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![2, 3].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
}
| https://github.com/gizatechxyz/orion |
tests/ml/linear_regressor_test.cairo | use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor, FP16x16TensorAdd};
use orion::operators::ml::linear::linear_regressor::{
LinearRegressorTrait, POST_TRANSFORM, LinearRegressor
};
use orion::numbers::{FP16x16, FixedTrait};
use core::debug::PrintTrait;
use orion::operators::nn::{NNTrait, FP16x16NN};
#[test]
#[available_gas(200000000000)]
fn test_linear_regressor() {
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 19661, sign: false }, FP16x16 { mag: 50463, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 32768, sign: false },].span();
let intercepts = Option::Some(intercepts);
let target: usize = 1;
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: LinearRegressor<FP16x16> = LinearRegressor {
coefficients, intercepts, target, post_transform
};
let scores = LinearRegressorTrait::predict(regressor, X);
assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27');
assert(*scores.data[1] == FP16x16 { mag: 79299, sign: true }, '*scores[1] == -1.21');
assert(*scores.data[2] == FP16x16 { mag: 140903, sign: true }, '*scores[2] == -2.15');
}
#[test]
#[available_gas(200000000000)]
fn test_linear_regressor_2() {
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 19661, sign: false },
FP16x16 { mag: 50463, sign: true },
FP16x16 { mag: 19661, sign: false },
FP16x16 { mag: 50463, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![
FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 45875, sign: false },
]
.span();
let intercepts = Option::Some(intercepts);
let target = 2;
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: LinearRegressor<FP16x16> = LinearRegressor {
coefficients, intercepts, target, post_transform
};
let scores = LinearRegressorTrait::predict(regressor, X);
assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27');
assert(*scores.data[1] == FP16x16 { mag: 4588, sign: true }, '*scores[1] == -0.07');
assert(*scores.data[2] == FP16x16 { mag: 79299, sign: true }, '*scores[2] == -1.21');
assert(*scores.data[3] == FP16x16 { mag: 66192, sign: true }, '*scores[3] == -1.01');
assert(*scores.data[4] == FP16x16 { mag: 140903, sign: true }, '*scores[4] == -2.15');
assert(*scores.data[5] == FP16x16 { mag: 127796, sign: true }, '*scores[5] == -1.95');
}
| https://github.com/gizatechxyz/orion |
tests/ml/normalizer_test.cairo | use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::numbers::FP16x16;
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq
};
#[test]
#[available_gas(200000000000)]
fn test_normalizer_max() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 32768, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 21845, sign: false });
data.append(FP16x16 { mag: 43690, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::MAX);
assert_eq(actual_output, expected_output);
}
#[test]
#[available_gas(200000000000)]
fn test_normalizer_l1() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 27306, sign: true });
data.append(FP16x16 { mag: 21845, sign: true });
data.append(FP16x16 { mag: 16384, sign: true });
data.append(FP16x16 { mag: 43690, sign: true });
data.append(FP16x16 { mag: 21845, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 10922, sign: false });
data.append(FP16x16 { mag: 21845, sign: false });
data.append(FP16x16 { mag: 32768, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::L1);
assert_eq(actual_output, expected_output);
}
#[test]
#[available_gas(200000000000)]
fn test_normalizer_l2() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 46340, sign: true });
data.append(FP16x16 { mag: 37072, sign: true });
data.append(FP16x16 { mag: 27804, sign: true });
data.append(FP16x16 { mag: 58617, sign: true });
data.append(FP16x16 { mag: 29308, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 17515, sign: false });
data.append(FP16x16 { mag: 35030, sign: false });
data.append(FP16x16 { mag: 52545, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::L2);
assert_eq(actual_output, expected_output);
}
#[test]
#[available_gas(200000000000)]
fn test_normalizer_max_avoid_div_zero() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::MAX);
assert_eq(actual_output, expected_output);
}
| https://github.com/gizatechxyz/orion |
tests/ml/svm_classifier_test.cairo | use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::numbers::FP64x64;
use orion::operators::tensor::implementations::tensor_fp64x64::{
FP64x64Tensor, FP64x64TensorPartialEq
};
use orion::operators::ml::svm::svm_classifier::{SVMClassifierTrait, POST_TRANSFORM, SVMClassifier};
use orion::operators::ml::svm::core::{KERNEL_TYPE};
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_sv_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 174499, sign: true },
FP16x16 { mag: 174499, sign: false },
FP16x16 { mag: 145149, sign: true },
FP16x16 { mag: 145149, sign: false },
FP16x16 { mag: 115799, sign: true },
FP16x16 { mag: 115799, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_sv_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 4273, sign: false },
FP16x16 { mag: 61262, sign: false },
FP16x16 { mag: 6450, sign: false },
FP16x16 { mag: 59085, sign: false },
FP16x16 { mag: 9563, sign: false },
FP16x16 { mag: 55972, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_sv_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 317, sign: false },
FP16x16 { mag: 65218, sign: false },
FP16x16 { mag: 771, sign: false },
FP16x16 { mag: 64764, sign: false },
FP16x16 { mag: 1858, sign: false },
FP16x16 { mag: 63677, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_sv_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 317, sign: false },
FP16x16 { mag: 65218, sign: false },
FP16x16 { mag: 771, sign: false },
FP16x16 { mag: 64764, sign: false },
FP16x16 { mag: 1858, sign: false },
FP16x16 { mag: 63677, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 7738, sign: true },
FP16x16 { mag: 29929, sign: true },
FP16x16 { mag: 27248, sign: false },
FP16x16 { mag: 21922, sign: false },
FP16x16 { mag: 4021, sign: true },
FP16x16 { mag: 15167, sign: true },
FP16x16 { mag: 4843, sign: false },
FP16x16 { mag: 5979, sign: false },
FP16x16 { mag: 304, sign: true },
FP16x16 { mag: 406, sign: true },
FP16x16 { mag: 17562, sign: true },
FP16x16 { mag: 9962, sign: true },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 30835, sign: false },
FP16x16 { mag: 25413, sign: false },
FP16x16 { mag: 39483, sign: false },
FP16x16 { mag: 38197, sign: false },
FP16x16 { mag: 31762, sign: false },
FP16x16 { mag: 28992, sign: false },
FP16x16 { mag: 33978, sign: false },
FP16x16 { mag: 34261, sign: false },
FP16x16 { mag: 32691, sign: false },
FP16x16 { mag: 32666, sign: false },
FP16x16 { mag: 28403, sign: false },
FP16x16 { mag: 30282, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_noprob_linear_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_linear_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 7738, sign: true },
FP16x16 { mag: 29929, sign: true },
FP16x16 { mag: 27248, sign: false },
FP16x16 { mag: 21922, sign: false },
FP16x16 { mag: 4021, sign: true },
FP16x16 { mag: 15167, sign: true },
FP16x16 { mag: 4843, sign: false },
FP16x16 { mag: 5979, sign: false },
FP16x16 { mag: 304, sign: true },
FP16x16 { mag: 406, sign: true },
FP16x16 { mag: 17562, sign: true },
FP16x16 { mag: 9962, sign: true },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_linear_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 30835, sign: false },
FP16x16 { mag: 25413, sign: false },
FP16x16 { mag: 39483, sign: false },
FP16x16 { mag: 38197, sign: false },
FP16x16 { mag: 31762, sign: false },
FP16x16 { mag: 28992, sign: false },
FP16x16 { mag: 33978, sign: false },
FP16x16 { mag: 34261, sign: false },
FP16x16 { mag: 32691, sign: false },
FP16x16 { mag: 32666, sign: false },
FP16x16 { mag: 28403, sign: false },
FP16x16 { mag: 30282, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_linear_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_linear_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_binary_none_fp64x64() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 18322911080742739968, sign: false },
FP64x64 { mag: 123832992966812224, sign: false },
FP64x64 { mag: 8658920114943337472, sign: false },
FP64x64 { mag: 9787823958766215168, sign: false },
FP64x64 { mag: 276645820873422144, sign: false },
FP64x64 { mag: 18170098252836128768, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_binary_logistic_fp64x64() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13461271680116586496, sign: false },
FP64x64 { mag: 9254325673410459648, sign: false },
FP64x64 { mag: 11349211717397211136, sign: false },
FP64x64 { mag: 11614494343921229824, sign: false },
FP64x64 { mag: 9292528880387112960, sign: false },
FP64x64 { mag: 13431074360067923968, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_binary_softmax_fp64x64() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13436811297474848768, sign: false },
FP64x64 { mag: 5009932776234703872, sign: false },
FP64x64 { mag: 8941229086247388160, sign: false },
FP64x64 { mag: 9505514987462162432, sign: false },
FP64x64 { mag: 5070622564237207552, sign: false },
FP64x64 { mag: 13376121509472344064, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_classifier_binary_softmax_zero_fp64x64() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13436811297474848768, sign: false },
FP64x64 { mag: 5009932776234703872, sign: false },
FP64x64 { mag: 8941229086247388160, sign: false },
FP64x64 { mag: 9505514987462162432, sign: false },
FP64x64 { mag: 5070622564237207552, sign: false },
FP64x64 { mag: 13376121509472344064, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
// ============ HELPER ============ //
fn svm_classifier_helper_linear(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 10169, sign: true },
FP16x16 { mag: 15905, sign: false },
FP16x16 { mag: 459, sign: false },
FP16x16 { mag: 26713, sign: false },
FP16x16 { mag: 2129, sign: true },
FP16x16 { mag: 18, sign: false },
FP16x16 { mag: 12830, sign: true },
FP16x16 { mag: 23097, sign: true },
FP16x16 { mag: 1415, sign: true },
FP16x16 { mag: 28717, sign: true },
FP16x16 { mag: 2994, sign: false },
FP16x16 { mag: 847, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![FP16x16 { mag: 336797, sign: true }].span();
let prob_b: Span<FP16x16> = array![FP16x16 { mag: 4194, sign: false }].span();
let rho: Span<FP16x16> = array![
FP16x16 { mag: 4908, sign: true },
FP16x16 { mag: 11563, sign: true },
FP16x16 { mag: 13872, sign: true },
FP16x16 { mag: 33829, sign: true }
]
.span();
let support_vectors: Span<FP16x16> = array![].span();
let classlabels: Span<usize> = array![0, 1, 2, 3].span();
let vectors_per_class = Option::None;
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_binary_noprob_linear_sv(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 50226, sign: false },
FP16x16 { mag: 5711, sign: false },
FP16x16 { mag: 7236, sign: false },
FP16x16 { mag: 63175, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 8025, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![].span();
let prob_b: Span<FP16x16> = array![].span();
let rho: Span<FP16x16> = array![FP16x16 { mag: 146479, sign: false }].span();
let support_vectors: Span<FP16x16> = array![
FP16x16 { mag: 314572, sign: false },
FP16x16 { mag: 222822, sign: false },
FP16x16 { mag: 124518, sign: false },
FP16x16 { mag: 327680, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 104857, sign: false },
FP16x16 { mag: 294912, sign: false },
FP16x16 { mag: 150732, sign: false },
FP16x16 { mag: 85196, sign: false },
FP16x16 { mag: 334233, sign: false },
FP16x16 { mag: 163840, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let classlabels: Span<usize> = array![0, 1].span();
let vectors_per_class = Option::Some(array![3, 1].span());
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_helper_noprob_linear(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 10169, sign: true },
FP16x16 { mag: 15905, sign: false },
FP16x16 { mag: 459, sign: false },
FP16x16 { mag: 26713, sign: false },
FP16x16 { mag: 2129, sign: true },
FP16x16 { mag: 18, sign: false },
FP16x16 { mag: 12830, sign: true },
FP16x16 { mag: 23097, sign: true },
FP16x16 { mag: 1415, sign: true },
FP16x16 { mag: 28717, sign: true },
FP16x16 { mag: 2994, sign: false },
FP16x16 { mag: 847, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![].span();
let prob_b: Span<FP16x16> = array![].span();
let rho: Span<FP16x16> = array![
FP16x16 { mag: 4908, sign: true },
FP16x16 { mag: 11563, sign: true },
FP16x16 { mag: 13872, sign: true },
FP16x16 { mag: 33829, sign: true }
]
.span();
let support_vectors: Span<FP16x16> = array![].span();
let classlabels: Span<usize> = array![0, 1, 2, 3].span();
let vectors_per_class = Option::None;
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_helper_fp64x64(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP64x64>, Tensor<FP64x64>) {
let coefficients: Span<FP64x64> = array![
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true }
]
.span();
let kernel_params: Span<FP64x64> = array![
FP64x64 { mag: 7054933896252620800, sign: false },
FP64x64 { mag: 0, sign: false },
FP64x64 { mag: 55340232221128654848, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::RBF;
let prob_a: Span<FP64x64> = array![FP64x64 { mag: 94799998099962986496, sign: true }].span();
let prob_b: Span<FP64x64> = array![FP64x64 { mag: 1180576833385529344, sign: false }].span();
let rho: Span<FP64x64> = array![FP64x64 { mag: 3082192501545631744, sign: false }].span();
let support_vectors: Span<FP64x64> = array![
FP64x64 { mag: 3528081300248330240, sign: false },
FP64x64 { mag: 19594207602596118528, sign: true },
FP64x64 { mag: 9235613999318433792, sign: false },
FP64x64 { mag: 10869715877100519424, sign: true },
FP64x64 { mag: 5897111318564962304, sign: true },
FP64x64 { mag: 1816720038917308416, sign: false },
FP64x64 { mag: 4564890528671334400, sign: false },
FP64x64 { mag: 21278987070814027776, sign: true },
FP64x64 { mag: 7581529597213147136, sign: false },
FP64x64 { mag: 10953113834067329024, sign: true },
FP64x64 { mag: 24318984989010034688, sign: true },
FP64x64 { mag: 30296187483321270272, sign: true },
FP64x64 { mag: 10305112258191032320, sign: false },
FP64x64 { mag: 17005441559857987584, sign: true },
FP64x64 { mag: 11555205301925838848, sign: false },
FP64x64 { mag: 2962701975885447168, sign: true },
FP64x64 { mag: 11741665981322231808, sign: true },
FP64x64 { mag: 15376232508819505152, sign: false },
FP64x64 { mag: 13908474645692022784, sign: false },
FP64x64 { mag: 7323415394302033920, sign: true },
FP64x64 { mag: 3284258824352956416, sign: true },
FP64x64 { mag: 11374683084831064064, sign: true },
FP64x64 { mag: 9087138148126818304, sign: false },
FP64x64 { mag: 8247488946750095360, sign: false }
]
.span();
let classlabels: Span<usize> = array![0, 1].span();
let vectors_per_class = Option::Some(array![4, 4].span());
let mut classifier: SVMClassifier<FP64x64> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP64x64> = TensorTrait::new(
array![3, 3].span(),
array![
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 14757395258967642112, sign: true },
FP64x64 { mag: 11068046444225730560, sign: true },
FP64x64 { mag: 7378697629483821056, sign: true },
FP64x64 { mag: 3689348814741910528, sign: true },
FP64x64 { mag: 0, sign: false },
FP64x64 { mag: 3689348814741910528, sign: false },
FP64x64 { mag: 7378697629483821056, sign: false },
FP64x64 { mag: 11068046444225730560, sign: false }
]
.span()
);
(classifier, X)
}
| https://github.com/gizatechxyz/orion |
tests/ml/svm_regressor_test.cairo | use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::ml::svm::svm_regressor::{SVMRegressorTrait, POST_TRANSFORM, SVMRegressor};
use orion::operators::ml::svm::core::{KERNEL_TYPE};
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_linear() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 27812, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 30684, sign: true },
FP16x16 { mag: 14908, sign: false },
FP16x16 { mag: 60501, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_poly() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 22456, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::POLY;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 34542, sign: false },
FP16x16 { mag: 35623, sign: false },
FP16x16 { mag: 35815, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_rbf() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 19848, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::RBF;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 19376, sign: false },
FP16x16 { mag: 31318, sign: false },
FP16x16 { mag: 45566, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_sigmoid() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 20108, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::SIGMOID;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 15683, sign: false },
FP16x16 { mag: 29421, sign: false },
FP16x16 { mag: 43364, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_linear_one_class_0() {
let post_transform = POST_TRANSFORM::NONE;
let one_class = 0;
let (mut regressor, X) = svm_regressor_linear_helper(post_transform, one_class);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 63484, sign: false },
FP16x16 { mag: 74218, sign: false },
FP16x16 { mag: 84953, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
#[test]
#[available_gas(200000000000)]
fn test_svm_regressor_linear_one_class_1() {
let post_transform = POST_TRANSFORM::NONE;
let one_class = 1;
let (mut regressor, X) = svm_regressor_linear_helper(post_transform, one_class);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
// ============ HELPER ============ //
fn svm_regressor_helper(
kernel_type: KERNEL_TYPE, kernel_params: Span<FP16x16>
) -> (SVMRegressor<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 54959, sign: false },
FP16x16 { mag: 54959, sign: true },
FP16x16 { mag: 29299, sign: false },
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 36236, sign: false }
]
.span();
let n_supports: usize = 7;
let one_class: usize = 0;
let rho: Span<FP16x16> = array![FP16x16 { mag: 35788, sign: false }].span();
let support_vectors: Span<FP16x16> = array![
FP16x16 { mag: 8421, sign: true },
FP16x16 { mag: 5842, sign: false },
FP16x16 { mag: 4510, sign: false },
FP16x16 { mag: 5202, sign: true },
FP16x16 { mag: 14783, sign: true },
FP16x16 { mag: 17380, sign: true },
FP16x16 { mag: 60595, sign: false },
FP16x16 { mag: 1674, sign: true },
FP16x16 { mag: 38669, sign: true },
FP16x16 { mag: 63803, sign: false },
FP16x16 { mag: 87720, sign: true },
FP16x16 { mag: 22236, sign: false },
FP16x16 { mag: 61816, sign: false },
FP16x16 { mag: 34267, sign: true },
FP16x16 { mag: 36418, sign: false },
FP16x16 { mag: 27471, sign: false },
FP16x16 { mag: 28421, sign: false },
FP16x16 { mag: 69270, sign: true },
FP16x16 { mag: 152819, sign: false },
FP16x16 { mag: 4065, sign: false },
FP16x16 { mag: 62274, sign: true }
]
.span();
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: SVMRegressor<FP16x16> = SVMRegressor {
coefficients,
kernel_params,
kernel_type,
n_supports,
one_class,
post_transform,
rho,
support_vectors,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 32768, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 19660, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 6553, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 6553, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 19660, sign: false },
]
.span()
);
(regressor, X)
}
fn svm_regressor_linear_helper(
post_transform: POST_TRANSFORM, one_class: usize
) -> (SVMRegressor<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 18540, sign: false },
FP16x16 { mag: 1746, sign: true },
FP16x16 { mag: 1097, sign: false }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let n_supports: usize = 0;
let rho: Span<FP16x16> = array![FP16x16 { mag: 81285, sign: false }].span();
let support_vectors: Span<FP16x16> = array![].span();
let mut regressor: SVMRegressor<FP16x16> = SVMRegressor {
coefficients,
kernel_params,
kernel_type,
n_supports,
one_class,
post_transform,
rho,
support_vectors,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(regressor, X)
}
| https://github.com/gizatechxyz/orion |
tests/ml/tree_ensemble_classifier.cairo | use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::operators::ml::tree_ensemble::core::{NODE_MODES, TreeEnsembleAttributes, TreeEnsemble};
use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{
TreeEnsembleClassifier, POST_TRANSFORM, TreeEnsembleClassifierTrait
};
use orion::operators::tensor::implementations::tensor_fp16x16::relative_eq;
use orion::operators::matrix::{MutMatrix, MutMatrixImpl};
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_multi_pt_none() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::NONE);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 60075, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 5461, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 37329, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 12528, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 15677, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 19853, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 28257, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 17424, sign: false }) == true,
'score[2, 2]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_multi_pt_softmax() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 35725, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 14284, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 15526, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 27266, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 18675, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 19594, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 21137, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 24029, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 20368, sign: false }) == true,
'score[2, 2]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_multi_pt_softmax_zero() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 0, 'labels[1] == 0');
assert(*labels[2] == 1, 'labels[2] == 1');
assert(labels.len() == 3, 'len(labels) == 3');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 45682, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 19853, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 27266, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 18675, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 19594, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 21137, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 24029, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 20368, sign: false }) == true,
'score[2, 2]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_multi_pt_logistic() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 0, 'labels[1] == 0');
assert(*labels[2] == 1, 'labels[2] == 1');
assert(labels.len() == 3, 'len(labels) == 3');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 46816, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 34132, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 41856, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 35890, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 36668, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 37693, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 39724, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 37098, sign: false }) == true,
'score[2, 2]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_binary_none() {
let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::NONE);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(labels.len() == 1, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 65536, sign: false }) == true,
'score[0, 1]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_binary_logistic() {
let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(
POST_TRANSFORM::LOGISTIC
);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(labels.len() == 1, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 17625, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 47910, sign: false }) == true,
'score[0, 1]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_binary_softmax() {
let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(labels.len() == 1, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 7812, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 57723, sign: false }) == true,
'score[0, 1]'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_classifier_binary_softmax_zero() {
let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(
POST_TRANSFORM::SOFTMAXZERO
);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// ASSERT LABELS
assert(*labels[0] == 1, 'labels[0]');
assert(labels.len() == 1, 'len(labels)');
// ASSERT SCORES
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 7812, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 57723, sign: false }) == true,
'score[0, 1]'
);
}
// #[test]
// #[available_gas(200000000000)]
// fn test_tree_ensemble_classifier_binary_probit() {
// let (mut classifier, X) = tree_ensemble_classifier_binary_class_helper(POST_TRANSFORM::PROBIT);
// let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
// // ASSERT LABELS
// assert(*labels[0] == 1, 'labels[0]');
// assert(labels.len() == 1, 'len(labels)');
// // ASSERT SCORES
// assert(
// relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
// 'score[0, 0]'
// );
// assert(
// relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 65536, sign: false }) == true,
// 'score[0, 1]'
// );
// }
// ============ HELPER ============ //
fn tree_ensemble_classifier_helper(
post_transform: POST_TRANSFORM
) -> (TreeEnsembleClassifier<FP16x16>, Tensor<FP16x16>) {
let class_ids: Span<usize> = array![0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]
.span();
let class_nodeids: Span<usize> = array![2, 2, 2, 3, 3, 3, 4, 4, 4, 1, 1, 1, 3, 3, 3, 4, 4, 4]
.span();
let class_treeids: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
.span();
let class_weights: Span<FP16x16> = array![
FP16x16 { mag: 30583, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 2185, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 15729, sign: false },
FP16x16 { mag: 3932, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 3277, sign: false },
FP16x16 { mag: 6746, sign: false },
FP16x16 { mag: 12529, sign: false },
FP16x16 { mag: 13493, sign: false },
]
.span();
let classlabels: Span<usize> = array![0, 1, 2].span();
let nodes_falsenodeids: Span<usize> = array![4, 3, 0, 0, 0, 2, 0, 4, 0, 0].span();
let nodes_featureids: Span<usize> = array![1, 0, 0, 0, 0, 1, 0, 0, 0, 0].span();
let nodes_missing_value_tracks_true: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span();
let nodes_modes: Span<NODE_MODES> = array![
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
]
.span();
let nodes_nodeids: Span<usize> = array![0, 1, 2, 3, 4, 0, 1, 2, 3, 4].span();
let nodes_treeids: Span<usize> = array![0, 0, 0, 0, 0, 1, 1, 1, 1, 1].span();
let nodes_truenodeids: Span<usize> = array![1, 2, 0, 0, 0, 1, 0, 3, 0, 0].span();
let nodes_values: Span<FP16x16> = array![
FP16x16 { mag: 81892, sign: false },
FP16x16 { mag: 19992, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 110300, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 44245, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
]
.span();
let tree_ids: Span<usize> = array![0, 1].span();
let mut root_index: Felt252Dict<usize> = Default::default();
root_index.insert(0, 0);
root_index.insert(1, 5);
let mut node_index: Felt252Dict<usize> = Default::default();
node_index
.insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0);
node_index
.insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1);
node_index
.insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2);
node_index
.insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3);
node_index
.insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4);
node_index
.insert(1089549915800264549621536909767699778745926517555586332772759280702396009108, 5);
node_index
.insert(1321142004022994845681377299801403567378503530250467610343381590909832171180, 6);
node_index
.insert(2592987851775965742543459319508348457290966253241455514226127639100457844774, 7);
node_index
.insert(2492755623019086109032247218615964389726368532160653497039005814484393419348, 8);
node_index
.insert(1323616023845704258113538348000047149470450086307731200728039607710316625916, 9);
let atts = TreeEnsembleAttributes {
nodes_falsenodeids,
nodes_featureids,
nodes_missing_value_tracks_true,
nodes_modes,
nodes_nodeids,
nodes_treeids,
nodes_truenodeids,
nodes_values
};
let mut ensemble: TreeEnsemble<FP16x16> = TreeEnsemble {
atts, tree_ids, root_index, node_index
};
let base_values: Option<Span<FP16x16>> = Option::None;
let mut classifier: TreeEnsembleClassifier<FP16x16> = TreeEnsembleClassifier {
ensemble,
class_ids,
class_nodeids,
class_treeids,
class_weights,
classlabels,
base_values,
post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52429, sign: true },
FP16x16 { mag: 39322, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39322, sign: false },
]
.span()
);
(classifier, X)
}
// ============ BINARY CLASS HELPER ============ //
fn tree_ensemble_classifier_binary_class_helper(
post_transform: POST_TRANSFORM
) -> (TreeEnsembleClassifier<FP16x16>, Tensor<FP16x16>) {
let class_ids: Span<usize> = array![
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
]
.span();
let class_nodeids: Span<usize> = array![
4,
5,
7,
10,
12,
13,
15,
17,
19,
20,
24,
26,
29,
31,
32,
33,
37,
38,
39,
40,
46,
49,
50,
52,
56,
57,
58,
59,
62,
64,
66,
67,
68,
73,
74,
75,
76,
81,
82,
83,
84,
88,
89,
91,
93,
94,
95,
98,
99,
101,
104,
106,
107,
108,
112,
113,
114,
115,
119,
121,
124,
125,
127,
128,
130,
131,
138,
140,
141,
142,
143,
148,
149,
150,
151,
152,
153,
154
]
.span();
let class_treeids: Span<usize> = array![
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
]
.span();
let class_weights: Span<FP16x16> = array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 43690, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false }
]
.span();
let classlabels: Span<usize> = array![0, 1].span();
let nodes_falsenodeids: Span<usize> = array![
116,
21,
6,
5,
0,
0,
8,
0,
14,
11,
0,
13,
0,
0,
16,
0,
18,
0,
20,
0,
0,
41,
34,
25,
0,
27,
0,
33,
30,
0,
32,
0,
0,
0,
40,
39,
38,
0,
0,
0,
0,
109,
96,
69,
60,
47,
0,
51,
50,
0,
0,
53,
0,
59,
58,
57,
0,
0,
0,
0,
68,
63,
0,
65,
0,
67,
0,
0,
0,
77,
76,
75,
74,
0,
0,
0,
0,
85,
84,
83,
82,
0,
0,
0,
0,
95,
90,
89,
0,
0,
92,
0,
94,
0,
0,
0,
100,
99,
0,
0,
102,
0,
108,
105,
0,
107,
0,
0,
0,
115,
114,
113,
0,
0,
0,
0,
132,
129,
120,
0,
122,
0,
126,
125,
0,
0,
128,
0,
0,
131,
0,
0,
154,
153,
144,
143,
142,
139,
0,
141,
0,
0,
0,
0,
152,
151,
150,
149,
0,
0,
0,
0,
0,
0,
0
]
.span();
let nodes_featureids: Span<usize> = array![
3,
2,
4,
8,
0,
0,
1,
0,
2,
7,
0,
0,
0,
0,
7,
0,
0,
0,
6,
0,
0,
8,
0,
2,
0,
7,
0,
7,
2,
0,
2,
0,
0,
0,
2,
6,
7,
0,
0,
0,
0,
7,
7,
0,
7,
1,
0,
0,
2,
0,
0,
2,
0,
2,
2,
6,
0,
0,
0,
0,
2,
0,
0,
1,
0,
6,
0,
0,
0,
0,
2,
6,
7,
0,
0,
0,
0,
6,
7,
2,
0,
0,
0,
0,
0,
2,
2,
7,
0,
0,
2,
0,
0,
0,
0,
0,
6,
1,
0,
0,
4,
0,
2,
2,
0,
0,
0,
0,
0,
0,
1,
2,
0,
0,
0,
0,
6,
0,
7,
0,
0,
0,
1,
3,
0,
0,
2,
0,
0,
8,
0,
0,
2,
2,
2,
4,
7,
3,
0,
1,
0,
0,
0,
0,
4,
3,
7,
8,
0,
0,
0,
0,
0,
0,
0
]
.span();
let nodes_missing_value_tracks_true: Span<usize> = array![
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
]
.span();
let nodes_modes: Span<NODE_MODES> = array![
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF
]
.span();
let nodes_nodeids: Span<usize> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
116,
117,
118,
119,
120,
121,
122,
123,
124,
125,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
142,
143,
144,
145,
146,
147,
148,
149,
150,
151,
152,
153,
154
]
.span();
let nodes_treeids: Span<usize> = array![
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
]
.span();
let nodes_truenodeids: Span<usize> = array![
1,
2,
3,
4,
0,
0,
7,
0,
9,
10,
0,
12,
0,
0,
15,
0,
17,
0,
19,
0,
0,
22,
23,
24,
0,
26,
0,
28,
29,
0,
31,
0,
0,
0,
35,
36,
37,
0,
0,
0,
0,
42,
43,
44,
45,
46,
0,
48,
49,
0,
0,
52,
0,
54,
55,
56,
0,
0,
0,
0,
61,
62,
0,
64,
0,
66,
0,
0,
0,
70,
71,
72,
73,
0,
0,
0,
0,
78,
79,
80,
81,
0,
0,
0,
0,
86,
87,
88,
0,
0,
91,
0,
93,
0,
0,
0,
97,
98,
0,
0,
101,
0,
103,
104,
0,
106,
0,
0,
0,
110,
111,
112,
0,
0,
0,
0,
117,
118,
119,
0,
121,
0,
123,
124,
0,
0,
127,
0,
0,
130,
0,
0,
133,
134,
135,
136,
137,
138,
0,
140,
0,
0,
0,
0,
145,
146,
147,
148,
0,
0,
0,
0,
0,
0,
0
]
.span();
let nodes_values: Span<FP16x16> = array![
FP16x16 { mag: 4096, sign: false },
FP16x16 { mag: 22937, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 16384, sign: false },
FP16x16 { mag: 57344, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 19660, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 24576, sign: false },
FP16x16 { mag: 42598, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 62259, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 62259, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 40960, sign: false },
FP16x16 { mag: 24576, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 19660, sign: false },
FP16x16 { mag: 45875, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 42598, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 36044, sign: false },
FP16x16 { mag: 19660, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 45875, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 36044, sign: false },
FP16x16 { mag: 58982, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 58982, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 45875, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 58982, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 42598, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 45875, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 29491, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 45875, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 36044, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 58982, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 36044, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 16384, sign: false },
FP16x16 { mag: 20480, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 49152, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 8192, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false }
]
.span();
let base_values: Option<Span<FP16x16>> = Option::None;
let tree_ids: Span<usize> = array![0].span();
let mut root_index: Felt252Dict<usize> = Default::default();
root_index.insert(0, 0);
let mut node_index: Felt252Dict<usize> = Default::default();
node_index
.insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0);
node_index
.insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1);
node_index
.insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2);
node_index
.insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3);
node_index
.insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4);
node_index
.insert(3344223123784052057366048933846905716067140384361791026153972616805110454637, 5);
node_index
.insert(658476905110174425295568215706634733332002869979287079110965040248935650599, 6);
node_index
.insert(2836212335642438363012490794290757623813171043187182819737087983331902926990, 7);
node_index
.insert(3496601277869056110810900082189273917786762659443522403285387602989271154262, 8);
node_index
.insert(1249294489531540970169611621067106471309281870082955806338234725206665112557, 9);
node_index
.insert(2161697998033672097816961828039488190903838124365465380011173778905747857792, 10);
node_index
.insert(1129815197211541481934112806673325772687763881719835256646064516195041515616, 11);
node_index
.insert(2592593088135949192377729543480191336537305484235681164569491942155715064163, 12);
node_index
.insert(578223957014284909949571568465953382377214912750427143720957054706073492593, 13);
node_index
.insert(1645617302026197421098102802983206579163506957138012501615708926120228167528, 14);
node_index
.insert(2809438816810155970395166036110536928593305127049404137239671320081144123490, 15);
node_index
.insert(2496308528011391755709310159103918074725328650411689040761791240500618770096, 16);
node_index
.insert(2003594778587446957576114348312422277631766150749194167061999666337236425714, 17);
node_index
.insert(2215681478480673835576618830034726157921200517935329010004363713426342305479, 18);
node_index
.insert(3185925835074464079989752015681272863271067691852543168049845807561733691707, 19);
node_index
.insert(1207265836470221457484062512091666004839070622130697586496866096347024057755, 20);
node_index
.insert(1870230949202979679764944800468118671928852128047695497376875566624821494262, 21);
node_index
.insert(618060852536781954395603948693216564334274573299243914053414488061601327758, 22);
node_index
.insert(232760707548494477255512699093366059519467428168757247456690480397246371463, 23);
node_index
.insert(1617386247965480308136742715422077429967341022950306068917456849194882895900, 24);
node_index
.insert(654822874782506608656472905579051041410086644071534146326024101025575400153, 25);
node_index
.insert(525638101901638132526332140778087078272370083489998903571807698910013602668, 26);
node_index
.insert(3091640181556387972179279087539287892670640556085669903494551919685982442095, 27);
node_index
.insert(1425411460578159050163131982087304445715005458700346341117759372943452688022, 28);
node_index
.insert(1722933265299553894839124723076027659619615015638971980461286818493531809034, 29);
node_index
.insert(3325117385742592388671007840076299062858228097051060057749225651290693960897, 30);
node_index
.insert(1869273998012404873272699831805499731567895666937555882116307079956228100456, 31);
node_index
.insert(257262395234910825879033951801423835835630270967846664413154594520703929530, 32);
node_index
.insert(2891500475385583315757684141371327604925143655360011721762142660942782195029, 33);
node_index
.insert(1257459981124043271342269816753070228024611695909553991758648317372015085782, 34);
node_index
.insert(3573101724490615587655146760489247477770015274618159524231872921394794809579, 35);
node_index
.insert(2951401777594449283985541406642940553317465718696638438535370997641527993378, 36);
node_index
.insert(2436860863451320452900512817385686838091627966322316039332239784330434600829, 37);
node_index
.insert(3257977356974702770994741663931928753019715185508521958836925918758890988390, 38);
node_index
.insert(2741853283805093821434776875305720302351684616683152528499335618682018880592, 39);
node_index
.insert(514567459251558911686762246500770717674979116530125263461114578537254680672, 40);
node_index
.insert(2119374930171040799805795099091470687208894498354655018353474015395489390434, 41);
node_index
.insert(3338470191188327918255138125570464269857839379813971679216902484398948556964, 42);
node_index
.insert(2892272281879752543368066497063301979597320550780387266511926397533716561161, 43);
node_index
.insert(2855312300216814846973137837923466865382642814675378398541743368270404441020, 44);
node_index
.insert(3483159989811162048659069774034779954374540681397531094699912464364012442948, 45);
node_index
.insert(2987290998320166766043911843685118029159841654368226419198314196237253901671, 46);
node_index
.insert(2925128850088180758852255336587985612621894021863350117875677692518888637440, 47);
node_index
.insert(2816470536741550741568042622139415760794090671576940833850781679568928363263, 48);
node_index
.insert(117504025904364990582663097556885493352655695615775952177872159762046032741, 49);
node_index
.insert(2143228410294149239354901612797540167003066966910132278060626241695943498248, 50);
node_index
.insert(419311759585766455354017006957403420381614228026953716552023555428752798694, 51);
node_index
.insert(3050064038480880151202753004776919876287903442365303272956696507808448797287, 52);
node_index
.insert(1385347512411195789080079656286641766866442255046855963092069449745407366357, 53);
node_index
.insert(3070310993421490198115289431281422702215620142859327949152517372324361472619, 54);
node_index
.insert(2913742884576958969164113782587195202828846527657900496424141449477472273564, 55);
node_index
.insert(2093568472535973986606438755824580633177115509557931302974988564932601955239, 56);
node_index
.insert(3560543329106347446823281318204312198881533222464682017397248462954529220234, 57);
node_index
.insert(2258329791422139736262782239641765930569031761627249090322755566443202104242, 58);
node_index
.insert(780147230530856456622774510057100334628735431063744145772648079601317149643, 59);
node_index
.insert(2316329094783634722527635915976455864728431870713378530935487247638854220445, 60);
node_index
.insert(595942459003356191117553450912822964169058193996898486073017533717706655996, 61);
node_index
.insert(468061318535033931711585815055033307297228787991312757359512916260570188285, 62);
node_index
.insert(2052204235688624923559873131063770183910134013049526186717275231865702195614, 63);
node_index
.insert(1699955311620840869165542755053722387608345658646185648087789689690825797785, 64);
node_index
.insert(3374282522812564185678772854203408947562394461702303390331208821006329361123, 65);
node_index
.insert(2973169188135795465401576355486514117723575153845438471619715618155257254587, 66);
node_index
.insert(1933845760462748501896196912926633344425020928596291295340561855718789280752, 67);
node_index
.insert(1400206374308839959676708676217334569580738052049798766556848516900888958934, 68);
node_index
.insert(1440488595273849761788031183901254714714513692476890759699232177835922420051, 69);
node_index
.insert(1765607197782429306903827944694032984087223086461400721152786273443512274576, 70);
node_index
.insert(1081728107764482028110815183657783965582618309560569428049406599883158895762, 71);
node_index
.insert(2062101824085365476835789898002802715794623271831111740147610520210138854237, 72);
node_index
.insert(2074740322618091900768870458741540994849904300182495465356314088191301853065, 73);
node_index
.insert(3258451235037745323160669027918885172565773098482160366154412360890640013860, 74);
node_index
.insert(525053653813541387331907730505904505067816165493211829943994988775279102044, 75);
node_index
.insert(1899573658331441767985549642643113663505618738939032010935036740376062596854, 76);
node_index
.insert(350484224543766923071449868701665032398970313961410080649918872017849315812, 77);
node_index
.insert(1950842492180490337143378914485176805944281696420768035114335939818602766139, 78);
node_index
.insert(1404824782481446239312837894341789608778585592445990662138109764117920511709, 79);
node_index
.insert(362836422984951199752185473435750713386745407518736982952373985921347236081, 80);
node_index
.insert(946623025367211063265176586824604502073515634531788667777364911179858705558, 81);
node_index
.insert(2633163324000277496191816132521100721217797223993064604664039067710591734562, 82);
node_index
.insert(1801986104078933931671502775029170829560335045042499367678597186639133610708, 83);
node_index
.insert(1420697278439090953165809531316265389371075037014378922361911811337560296928, 84);
node_index
.insert(2818913779862691152404893285048164649343019708946413114150419613972391643833, 85);
node_index
.insert(2117995436013652728497840885480545729833030913486848118093758726746902541269, 86);
node_index
.insert(127751852951361188238686395231851222850913859197429858579312845246901369178, 87);
node_index
.insert(2698811633001158191033663638617437313508153976714307643233173949778419312517, 88);
node_index
.insert(658388282521842455588914251287531837029259203197178137902217792556456503561, 89);
node_index
.insert(1181527093320872098458354979612125149419384756607076935731557552577945926179, 90);
node_index
.insert(749436134732178646256740138670151907037714564259781780243747781475007506978, 91);
node_index
.insert(139527053159256821789882596124320673637475746672994443968014105962305658551, 92);
node_index
.insert(2256264752321707533173578319742847366660740117899562657584919346001438808295, 93);
node_index
.insert(1471349294215639651865069312281269029496180149092207674923855978537861742949, 94);
node_index
.insert(1599527610774916650758786135513735847459194869088601099692148267264507139422, 95);
node_index
.insert(1348925567371118538973078195838174941892601233016661969987842843098656775084, 96);
node_index
.insert(3255130909854220350850821724488067913492420563978595271106701962634473840914, 97);
node_index
.insert(1098499015810170842401428216621470177488952811780672364884710297364076372943, 98);
node_index
.insert(2666902303639302012507119689908308317608522901613536135678723310999647515155, 99);
node_index
.insert(907997515879651052705985194221621380802961721264372722705825219340461809200, 100);
node_index
.insert(2124360554325144308113106422635485756539471211141315552843423768396084888273, 101);
node_index
.insert(3598736440043009208771817410113758019876931018927260161846683440123219507147, 102);
node_index
.insert(1237113034722832488580561245188430373504295256910735188987019984096012001931, 103);
node_index
.insert(884558344049768836371555446021588200903052780339208951904957349404044037185, 104);
node_index
.insert(784280321344489256066716285882203121428790637989919760379274813665427427262, 105);
node_index
.insert(3472551952588748711709398308465335743810517871695257916614928877311914574241, 106);
node_index
.insert(1579363348100943961344032004617708767155021524242506190674861550786419896732, 107);
node_index
.insert(653576968777651719072715499492112313607520878545254037043893560183879857489, 108);
node_index
.insert(2633327961579170199842757290989312779085828750765842327985383652720803061926, 109);
node_index
.insert(3101204920253220343970782457572784926765600523633379722044614528209389590915, 110);
node_index
.insert(2537565394330405662800880050062241097694806466900452037378113841155978555645, 111);
node_index
.insert(306955559655552244989220345789093187601563118591829582730637833945761653350, 112);
node_index
.insert(1144065212212058748489308207801098564095305699242880891977316839573431241916, 113);
node_index
.insert(3478181491851418723342103101321490659650934149094649769124337426850038155270, 114);
node_index
.insert(3419621624676637660673415219086314486713019053519954317586073983685881930356, 115);
node_index
.insert(2426908011370291613447136873176769136554489197972200481728552402228021778402, 116);
node_index
.insert(1916122042123370178944690083048900704842269230325086549679099089416174875473, 117);
node_index
.insert(2057207652658215393591191155928140567561900227203223756539551876829334137660, 118);
node_index
.insert(2722034389703601317070746005702467061064354401688341549606678773616189196490, 119);
node_index
.insert(1171026027377763359814377926117880688616494219551682642535759838199732407496, 120);
node_index
.insert(3507234282031533800397666430789917374211847440333243952151005899337152633413, 121);
node_index
.insert(591003147462937848375161803108517142253138969543815135207326321181858185919, 122);
node_index
.insert(182069734527202013451813026473135702900640769187641767871411473365447302169, 123);
node_index
.insert(1195243682249232878341146428166676460720423167409013083888435705219134747702, 124);
node_index
.insert(1793425644853312386902998134061844248823841892125424765064687913085130719534, 125);
node_index
.insert(1983622665815164792580256365519803214027269990384198703315493315153573288434, 126);
node_index
.insert(3615973154491344159350153395208055142342062736505558158666764642048838175685, 127);
node_index
.insert(2751715913626909804252433699602081411293721754810298670422380863932998088133, 128);
node_index
.insert(186918881712189523740089713555196200069231794627360499557319265374750577226, 129);
node_index
.insert(696585542544434929491503209053317581175146475161262066468664234437983008675, 130);
node_index
.insert(4359830495913805154545225899592517767672472055784183911796827820518038513, 131);
node_index
.insert(2954335207058000607751727656601539819316106074875304820535376873121805433820, 132);
node_index
.insert(2510390039949230255082316953804013731253145558531652907601250263563528226672, 133);
node_index
.insert(3226995230854300551967642178527450300960499043510855212238369890580256668532, 134);
node_index
.insert(1620924075233065517364532267959798304439946408626316544761884056227131075831, 135);
node_index
.insert(1610900122192929153657761847202689179268074338802437933866337242354758101660, 136);
node_index
.insert(2565949095169598991903537465065584077778440646580025930326495506484329892725, 137);
node_index
.insert(1012362975819634411571869839734809106575285344002573666983595104659295812607, 138);
node_index
.insert(242312010918799555845832460483650516749990744287009628468613253461264531026, 139);
node_index
.insert(1104776796569046483584574115975216172161469015460244982207905888870418040487, 140);
node_index
.insert(3289555912992777681578950209252840071327866822704829766247386311885634446673, 141);
node_index
.insert(3133389957643610781371406448279843175887428913359743769920083259111437722268, 142);
node_index
.insert(1169918710119352022244140656086831769713178729571654411898266328562003734517, 143);
node_index
.insert(3592039235252149652556167686570045881877115549259769455422056097903987237819, 144);
node_index
.insert(2048175709145840597887667330964815895803568760936075562647625937161113445908, 145);
node_index
.insert(602222645962845554276438041138511866776339653340605661136009451417275008940, 146);
node_index
.insert(3318742320906017551291978242369663702298606650330380959683585594592748661010, 147);
node_index
.insert(564160996724923690963741657975239836484028160385417016805513722318839327322, 148);
node_index
.insert(656294390376267384135628810815504467149264887388377312825033341338166573620, 149);
node_index
.insert(1201592236750942207412694706123654466634588634474700675083122904145559965915, 150);
node_index
.insert(2141408926815137181004274624388915700231991905288681935478972043994347966006, 151);
node_index
.insert(1440847977042239464860406726605567303568767649154338464116083965986084755262, 152);
node_index
.insert(950585553138591375958592507876257987416844837045084288783892644487908218679, 153);
node_index
.insert(257643451533833048856069434258149588745628261389615631070776723485957908127, 154);
let atts = TreeEnsembleAttributes {
nodes_falsenodeids,
nodes_featureids,
nodes_missing_value_tracks_true,
nodes_modes,
nodes_nodeids,
nodes_treeids,
nodes_truenodeids,
nodes_values
};
let mut ensemble: TreeEnsemble<FP16x16> = TreeEnsemble {
atts, tree_ids, root_index, node_index
};
let mut classifier: TreeEnsembleClassifier<FP16x16> = TreeEnsembleClassifier {
ensemble,
class_ids,
class_nodeids,
class_treeids,
class_weights,
classlabels,
base_values,
post_transform
};
let mut X = TensorTrait::new(
array![1, 9].span(),
array![
FP16x16 { mag: 39321, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 52428, sign: false },
FP16x16 { mag: 16384, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 16384, sign: false },
FP16x16 { mag: 0, sign: false },
]
.span()
);
(classifier, X)
}
| https://github.com/gizatechxyz/orion |
tests/ml/tree_ensemble_regressor.cairo | use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::operators::ml::tree_ensemble::core::{NODE_MODES, TreeEnsembleAttributes, TreeEnsemble};
use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{
TreeEnsembleRegressor, POST_TRANSFORM, TreeEnsembleRegressorTrait, AGGREGATE_FUNCTION
};
use orion::operators::matrix::{MutMatrix, MutMatrixImpl};
use orion::operators::tensor::implementations::tensor_fp16x16::relative_eq;
use core::debug::PrintTrait;
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_regressor_SUM() {
let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM);
let mut res = TreeEnsembleRegressorTrait::predict(regressor, X);
// ASSERT RES
assert(
relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 37809, sign: false }) == true,
'res[0, 0] = 37809'
);
assert(
relative_eq(@res.get(1, 0).unwrap(), @FP16x16 { mag: 37809, sign: false }) == true,
'res[1, 0] = 37809'
);
assert(
relative_eq(@res.get(2, 0).unwrap(), @FP16x16 { mag: 37809, sign: false }) == true,
'res[2, 0] = 37809'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_regressor_AVERAGE() {
let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::AVERAGE);
let mut res = TreeEnsembleRegressorTrait::predict(regressor, X);
// ASSERT RES
assert(
relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 18904, sign: false }) == true,
'res[0, 0] = 18904'
);
assert(
relative_eq(@res.get(1, 0).unwrap(), @FP16x16 { mag: 18904, sign: false }) == true,
'res[1, 0] = 18904'
);
assert(
relative_eq(@res.get(2, 0).unwrap(), @FP16x16 { mag: 18904, sign: false }) == true,
'res[2, 0] = 18904'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_regressor_MIN() {
let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MIN);
let mut res = TreeEnsembleRegressorTrait::predict(regressor, X);
// ASSERT RES
assert(
relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 5041, sign: false }) == true,
'res[0, 0] = 5041'
);
assert(
relative_eq(@res.get(1, 0).unwrap(), @FP16x16 { mag: 5041, sign: false }) == true,
'res[1, 0] = 5041'
);
assert(
relative_eq(@res.get(2, 0).unwrap(), @FP16x16 { mag: 5041, sign: false }) == true,
'res[2, 0] = 5041'
);
}
#[test]
#[available_gas(200000000000)]
fn test_tree_ensemble_regressor_MAX() {
let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::MAX);
let mut res = TreeEnsembleRegressorTrait::predict(regressor, X);
// ASSERT RES
assert(
relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true,
'res[0, 0] = 32768'
);
assert(
relative_eq(@res.get(1, 0).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true,
'res[1, 0] = 32768'
);
assert(
relative_eq(@res.get(2, 0).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true,
'res[2, 0] = 32768'
);
}
// ============ HELPER ============ //
fn tree_ensemble_regressor_helper(
agg: AGGREGATE_FUNCTION
) -> (TreeEnsembleRegressor<FP16x16>, Tensor<FP16x16>) {
let n_targets: usize = 1;
let aggregate_function = agg;
let nodes_falsenodeids: Span<usize> = array![4, 3, 0, 0, 0, 2, 0, 4, 0, 0].span();
let nodes_featureids: Span<usize> = array![0, 2, 0, 0, 0, 0, 0, 2, 0, 0].span();
let nodes_missing_value_tracks_true: Span<usize> = array![0, 0, 0, 0, 0, 0, 0, 0, 0, 0].span();
let nodes_modes: Span<NODE_MODES> = array![
NODE_MODES::BRANCH_LEQ,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::BRANCH_LEQ,
NODE_MODES::LEAF,
NODE_MODES::LEAF
]
.span();
let nodes_nodeids: Span<usize> = array![0, 1, 2, 3, 4, 0, 1, 2, 3, 4].span();
let nodes_treeids: Span<usize> = array![0, 0, 0, 0, 0, 1, 1, 1, 1, 1].span();
let nodes_truenodeids: Span<usize> = array![1, 2, 0, 0, 0, 1, 0, 3, 0, 0].span();
let nodes_values: Span<FP16x16> = array![
FP16x16 { mag: 17462, sign: false },
FP16x16 { mag: 40726, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 47240, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 36652, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 0, sign: false }
]
.span();
let target_ids: Span<usize> = array![0, 0, 0, 0, 0, 0].span();
let target_nodeids: Span<usize> = array![2, 3, 4, 1, 3, 4].span();
let target_treeids: Span<usize> = array![0, 0, 0, 1, 1, 1].span();
let target_weights: Span<FP16x16> = array![
FP16x16 { mag: 5041, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 32768, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 18724, sign: false },
FP16x16 { mag: 32768, sign: false }
]
.span();
let base_values: Option<Span<FP16x16>> = Option::None;
let post_transform = POST_TRANSFORM::NONE;
let tree_ids: Span<usize> = array![0, 1].span();
let mut root_index: Felt252Dict<usize> = Default::default();
root_index.insert(0, 0);
root_index.insert(1, 5);
let mut node_index: Felt252Dict<usize> = Default::default();
node_index
.insert(2089986280348253421170679821480865132823066470938446095505822317253594081284, 0);
node_index
.insert(2001140082530619239661729809084578298299223810202097622761632384561112390979, 1);
node_index
.insert(2592670241084192212354027440049085852792506518781954896144296316131790403900, 2);
node_index
.insert(2960591271376829378356567803618548672034867345123727178628869426548453833420, 3);
node_index
.insert(458933264452572171106695256465341160654132084710250671055261382009315664425, 4);
node_index
.insert(1089549915800264549621536909767699778745926517555586332772759280702396009108, 5);
node_index
.insert(1321142004022994845681377299801403567378503530250467610343381590909832171180, 6);
node_index
.insert(2592987851775965742543459319508348457290966253241455514226127639100457844774, 7);
node_index
.insert(2492755623019086109032247218615964389726368532160653497039005814484393419348, 8);
node_index
.insert(1323616023845704258113538348000047149470450086307731200728039607710316625916, 9);
let atts = TreeEnsembleAttributes {
nodes_falsenodeids,
nodes_featureids,
nodes_missing_value_tracks_true,
nodes_modes,
nodes_nodeids,
nodes_treeids,
nodes_truenodeids,
nodes_values
};
let mut ensemble: TreeEnsemble<FP16x16> = TreeEnsemble {
atts, tree_ids, root_index, node_index
};
let mut regressor: TreeEnsembleRegressor<FP16x16> = TreeEnsembleRegressor {
ensemble,
target_ids,
target_nodeids,
target_treeids,
target_weights,
base_values,
n_targets,
aggregate_function,
post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 32768, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 19660, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 6553, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 6553, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 19660, sign: false },
]
.span()
);
(regressor, X)
}
| https://github.com/gizatechxyz/orion |
tests/nodes.cairo | mod abs_fp16x16;
mod abs_fp8x23;
mod abs_i32;
mod abs_i8;
mod acos_fp16x16;
mod acos_fp8x23;
mod acosh_fp16x16;
mod acosh_fp8x23;
mod add_fp16x16;
mod add_fp16x16_broadcast;
mod add_fp8x23;
mod add_fp8x23_broadcast;
mod add_i32;
mod add_i32_broadcast;
mod add_i8;
mod add_i8_broadcast;
mod add_u32;
mod add_u32_broadcast;
mod argmin_fp16x16_1D_default;
mod argmin_fp16x16_1D_keepdims_false;
mod argmin_fp16x16_1D_last_index;
mod argmin_fp16x16_2D_default;
mod argmin_fp16x16_2D_keepdims_false;
mod argmin_fp16x16_2D_last_index;
mod argmin_fp16x16_3D_default;
mod argmin_fp16x16_3D_keepdims_false;
mod argmin_fp16x16_3D_last_index;
mod argmin_fp8x23_1D_default;
mod argmin_fp8x23_1D_keepdims_false;
mod argmin_fp8x23_1D_last_index;
mod argmin_fp8x23_2D_default;
mod argmin_fp8x23_2D_keepdims_false;
mod argmin_fp8x23_2D_last_index;
mod argmin_fp8x23_3D_default;
mod argmin_fp8x23_3D_keepdims_false;
mod argmin_fp8x23_3D_last_index;
mod argmin_i32_1D_default;
mod argmin_i32_1D_keepdims_false;
mod argmin_i32_1D_last_index;
mod argmin_i32_2D_default;
mod argmin_i32_2D_keepdims_false;
mod argmin_i32_2D_last_index;
mod argmin_i32_3D_default;
mod argmin_i32_3D_keepdims_false;
mod argmin_i32_3D_last_index;
mod argmin_i8_1D_default;
mod argmin_i8_1D_keepdims_false;
mod argmin_i8_1D_last_index;
mod argmin_i8_2D_default;
mod argmin_i8_2D_keepdims_false;
mod argmin_i8_2D_last_index;
mod argmin_i8_3D_default;
mod argmin_i8_3D_keepdims_false;
mod argmin_i8_3D_last_index;
mod argmin_u32_1D_default;
mod argmin_u32_1D_keepdims_false;
mod argmin_u32_1D_last_index;
mod argmin_u32_2D_default;
mod argmin_u32_2D_keepdims_false;
mod argmin_u32_2D_last_index;
mod argmin_u32_3D_default;
mod argmin_u32_3D_keepdims_false;
mod argmin_u32_3D_last_index;
mod asin_fp16x16;
mod asin_fp8x23;
mod asinh_fp16x16;
mod asinh_fp8x23;
mod atan_fp16x16;
mod atan_fp8x23;
mod ceil_fp16x16;
mod ceil_fp8x23;
mod concat_fp16x16_1d;
mod concat_fp16x16_2d;
mod concat_fp16x16_3d_default;
mod concat_fp16x16_3d_axis_1;
mod concat_fp16x16_3d_axis_2;
mod concat_fp16x16_3d_three_tensors_axis_1;
mod concat_fp16x16_3d_three_tensors_axis_2;
mod concat_fp8x23_1d;
mod concat_fp8x23_2d;
mod concat_fp8x23_3d_default;
mod concat_fp8x23_3d_axis_1;
mod concat_fp8x23_3d_axis_2;
mod concat_fp8x23_3d_three_tensors_axis_1;
mod concat_fp8x23_3d_three_tensors_axis_2;
mod concat_i32_1d;
mod concat_i32_2d;
mod concat_i32_3d_default;
mod concat_i32_3d_axis_1;
mod concat_i32_3d_axis_2;
mod concat_i32_3d_three_tensors_axis_1;
mod concat_i32_3d_three_tensors_axis_2;
mod concat_i8_1d;
mod concat_i8_2d;
mod concat_i8_3d_default;
mod concat_i8_3d_axis_1;
mod concat_i8_3d_axis_2;
mod concat_i8_3d_three_tensors_axis_1;
mod concat_i8_3d_three_tensors_axis_2;
mod concat_u32_1d;
mod concat_u32_2d;
mod concat_u32_3d_default;
mod concat_u32_3d_axis_1;
mod concat_u32_3d_axis_2;
mod concat_u32_3d_three_tensors_axis_1;
mod concat_u32_3d_three_tensors_axis_2;
mod cos_fp16x16;
mod cos_fp8x23;
mod cosh_fp16x16;
mod cosh_fp8x23;
mod cumsum_fp16x16_1d_default;
mod cumsum_fp16x16_1d_exclusive;
mod cumsum_fp16x16_1d_reverse;
mod cumsum_fp16x16_1d_reverse_exclusive;
mod cumsum_fp16x16_2d_axis_0;
mod cumsum_fp16x16_2d_axis_1;
mod cumsum_fp8x23_1d_default;
mod cumsum_fp8x23_1d_exclusive;
mod cumsum_fp8x23_1d_reverse;
mod cumsum_fp8x23_1d_reverse_exclusive;
mod cumsum_fp8x23_2d_axis_0;
mod cumsum_fp8x23_2d_axis_1;
mod cumsum_i32_1d_default;
mod cumsum_i32_1d_exclusive;
mod cumsum_i32_1d_reverse;
mod cumsum_i32_1d_reverse_exclusive;
mod cumsum_i32_2d_axis_0;
mod cumsum_i32_2d_axis_1;
mod cumsum_i8_1d_default;
mod cumsum_i8_1d_exclusive;
mod cumsum_i8_1d_reverse;
mod cumsum_i8_1d_reverse_exclusive;
mod cumsum_i8_2d_axis_0;
mod cumsum_i8_2d_axis_1;
mod cumsum_u32_1d_default;
mod cumsum_u32_1d_exclusive;
mod cumsum_u32_1d_reverse;
mod cumsum_u32_1d_reverse_exclusive;
mod cumsum_u32_2d_axis_0;
mod cumsum_u32_2d_axis_1;
mod div_fp16x16;
mod div_fp16x16_broadcast;
mod div_fp8x23;
mod div_fp8x23_broadcast;
mod div_i32;
mod div_i32_broadcast;
mod div_i8;
mod div_i8_broadcast;
mod div_u32;
mod div_u32_broadcast;
mod equal_fp16x16;
mod equal_fp16x16_broadcast;
mod equal_fp8x23;
mod equal_fp8x23_broadcast;
mod equal_i32;
mod equal_i32_broadcast;
mod equal_i8;
mod equal_i8_broadcast;
mod equal_u32;
mod equal_u32_broadcast;
mod exp_fp16x16;
mod exp_fp8x23;
mod less_equal_fp16x16;
mod less_equal_fp16x16_broadcast;
mod less_equal_fp8x23;
mod less_equal_fp8x23_broadcast;
mod less_equal_i32;
mod less_equal_i32_broadcast;
mod less_equal_i8;
mod less_equal_i8_broadcast;
mod less_equal_u32;
mod less_equal_u32_broadcast;
mod greater_fp16x16;
mod greater_fp16x16_broadcast;
mod greater_fp8x23;
mod greater_fp8x23_broadcast;
mod greater_i32;
mod greater_i32_broadcast;
mod greater_i8;
mod greater_i8_broadcast;
mod greater_u32;
mod greater_u32_broadcast;
mod leaky_relu_fp16x16;
mod leaky_relu_fp8x23;
mod linear_fp16x16;
mod linear_fp8x23;
mod linear_i32;
mod linear_i8;
mod linear_u32;
mod log_fp16x16;
mod log_fp8x23;
mod logsoftmax_fp16x16_axis_0;
mod logsoftmax_fp16x16_axis_1;
mod logsoftmax_fp8x23_axis_0;
mod logsoftmax_fp8x23_axis_1;
mod matmul_fp16x16_1d;
mod matmul_fp16x16_2x2;
mod matmul_fp16x16_2x1;
mod matmul_fp16x16_1x2;
mod matmul_fp8x23_1d;
mod matmul_fp8x23_2x2;
mod matmul_fp8x23_2x1;
mod matmul_fp8x23_1x2;
mod matmul_i32_1d;
mod matmul_i32_2x2;
mod matmul_i32_2x1;
mod matmul_i32_1x2;
mod matmul_i8_1d;
mod matmul_i8_2x2;
mod matmul_i8_2x1;
mod matmul_i8_1x2;
mod matmul_u32_1d;
mod matmul_u32_2x2;
mod matmul_u32_2x1;
mod matmul_u32_1x2;
mod mul_fp16x16;
mod mul_fp16x16_broadcast;
mod mul_fp8x23;
mod mul_fp8x23_broadcast;
mod mul_i32;
mod mul_i32_broadcast;
mod mul_i8;
mod mul_i8_broadcast;
mod mul_u32;
mod mul_u32_broadcast;
mod or_fp16x16;
mod or_fp16x16_broadcast;
mod or_fp8x23;
mod or_fp8x23_broadcast;
mod or_i32;
mod or_i32_broadcast;
mod or_i8;
mod or_i8_broadcast;
mod or_u32;
mod or_u32_broadcast;
mod relu_fp16x16;
mod relu_fp8x23;
mod relu_i32;
mod relu_i8;
mod sigmoid_fp16x16;
mod sigmoid_fp8x23;
mod sin_fp16x16;
mod sin_fp8x23;
mod sinh_fp16x16;
mod sinh_fp8x23;
mod softplus_fp8x23;
mod softplus_fp16x16;
mod softsign_fp8x23;
mod softsign_fp16x16;
mod sqrt_fp16x16;
mod sqrt_fp8x23;
mod sub_fp16x16;
mod sub_fp16x16_broadcast;
mod sub_fp8x23;
mod sub_fp8x23_broadcast;
mod sub_i32;
mod sub_i32_broadcast;
mod sub_i8;
mod sub_i8_broadcast;
mod sub_u32;
mod sub_u32_broadcast;
mod tanh_fp16x16;
mod tanh_fp8x23;
mod transpose_fp16x16_2d;
mod transpose_fp16x16_3d;
mod transpose_fp8x23_2d;
mod transpose_fp8x23_3d;
mod transpose_i32_2d;
mod transpose_i32_3d;
mod transpose_i8_2d;
mod transpose_i8_3d;
mod transpose_u32_2d;
mod transpose_u32_3d;
mod xor_fp16x16;
mod xor_fp16x16_broadcast;
mod xor_fp8x23;
mod xor_fp8x23_broadcast;
mod xor_i32;
mod xor_i32_broadcast;
mod xor_i8;
mod xor_i8_broadcast;
mod xor_u32;
mod xor_u32_broadcast;
mod greater_equal_fp16x16;
mod greater_equal_fp16x16_broadcast;
mod greater_equal_fp8x23;
mod greater_equal_fp8x23_broadcast;
mod greater_equal_i32;
mod greater_equal_i32_broadcast;
mod greater_equal_i8;
mod greater_equal_i8_broadcast;
mod greater_equal_u32;
mod greater_equal_u32_broadcast;
mod slice_fp16x16_2d;
mod slice_fp16x16_3d;
mod slice_fp8x23_2d;
mod slice_fp8x23_3d;
mod slice_i32_2d;
mod slice_i32_3d;
mod slice_i8_2d;
mod slice_i8_3d;
mod slice_u32_2d;
mod slice_u32_3d;
mod nonzero_fp16x16_2d;
mod nonzero_fp16x16_3d;
mod nonzero_fp8x23_2d;
mod nonzero_fp8x23_3d;
mod nonzero_i32_2d;
mod nonzero_i32_3d;
mod nonzero_i8_2d;
mod nonzero_i8_3d;
mod nonzero_u32_2d;
mod nonzero_u32_3d;
mod squeeze_fP16x16;
mod squeeze_fP8x23;
mod squeeze_i32;
mod squeeze_i8;
mod squeeze_u32;
mod unsqueeze_fp16x16_2d;
mod unsqueeze_fp16x16_3d;
mod unsqueeze_fp8x23_2d;
mod unsqueeze_fp8x23_3d;
mod unsqueeze_i32_2d;
mod unsqueeze_i32_3d;
mod unsqueeze_i8_2d;
mod unsqueeze_i8_3d;
mod unsqueeze_u32_2d;
mod unsqueeze_u32_3d;
mod sign_fP16x16;
mod sign_fP8x23;
mod sign_fail;
mod sign_i32;
mod sign_i8;
mod clip_fp16x16_2d;
mod clip_fp16x16_3d;
mod clip_fp8x23_2d;
mod clip_fp8x23_3d;
mod clip_i32_2d;
mod clip_i32_3d;
mod clip_i8_2d;
mod clip_i8_3d;
mod clip_u32_2d;
mod clip_u32_3d;
mod identity_fP16x16;
mod identity_fP8x23;
mod identity_i32;
mod identity_i8;
mod identity_u32;
mod thresholded_relu_fp16x16;
mod thresholded_relu_fp8x23;
mod hard_sigmoid_fp8x23;
mod hard_sigmoid_fp16x16;
mod neg_fp16x16;
mod neg_fp8x23;
mod neg_i32;
mod neg_i8;
mod gemm_all_attributes;
mod gemm_alpha;
mod gemm_beta;
mod gemm_default_matrix_bias;
mod gemm_default_vector_bias;
mod gemm_default_no_bias;
mod gemm_transposeA;
mod gemm_transposeB;
mod min_fp16x16_three_tensors;
mod min_fp16x16_broadcast_three_tensors;
mod min_fp16x16_two_tensors;
mod min_fp16x16_broadcast_two_tensors;
mod min_fp8x23_three_tensors;
mod min_fp8x23_broadcast_three_tensors;
mod min_fp8x23_two_tensors;
mod min_fp8x23_broadcast_two_tensors;
mod min_i32_three_tensors;
mod min_i32_broadcast_three_tensors;
mod min_i32_two_tensors;
mod min_i32_broadcast_two_tensors;
mod min_i8_three_tensors;
mod min_i8_broadcast_three_tensors;
mod min_i8_two_tensors;
mod min_i8_broadcast_two_tensors;
mod min_u32_three_tensors;
mod min_u32_broadcast_three_tensors;
mod min_u32_two_tensors;
mod min_u32_broadcast_two_tensors;
mod where_fp16x16;
mod where_fp16x16_broadcast;
mod where_fp8x23;
mod where_fp8x23_broadcast;
mod where_i32;
mod where_i32_broadcast;
mod where_i8;
mod where_i8_broadcast;
mod where_u32;
mod where_u32_broadcast;
mod not_bool;
mod round_fp16x16;
mod round_fp8x23;
mod max_fp16x16_three_tensors;
mod max_fp16x16_broadcast_three_tensors;
mod max_fp16x16_two_tensors;
mod max_fp16x16_broadcast_two_tensors;
mod max_fp8x23_three_tensors;
mod max_fp8x23_broadcast_three_tensors;
mod max_fp8x23_two_tensors;
mod max_fp8x23_broadcast_two_tensors;
mod max_i32_three_tensors;
mod max_i32_broadcast_three_tensors;
mod max_i32_two_tensors;
mod max_i32_broadcast_two_tensors;
mod max_i8_three_tensors;
mod max_i8_broadcast_three_tensors;
mod max_i8_two_tensors;
mod max_i8_broadcast_two_tensors;
mod max_u32_three_tensors;
mod max_u32_broadcast_three_tensors;
mod max_u32_two_tensors;
mod max_u32_broadcast_two_tensors;
mod scatter_fp16x16_3d_default;
mod scatter_fp16x16_3d_axis1;
mod scatter_fp16x16_3d_axis1_add;
mod scatter_fp8x23_default;
mod scatter_fp8x23_axis1;
mod scatter_fp8x23_mul;
mod scatter_i8_default;
mod scatter_i8_axis1;
mod scatter_i8_axis1_max;
mod scatter_u32_default;
mod scatter_u32_axis1;
mod scatter_u32_add;
mod array_feature_extractor_1D_i32;
mod array_feature_extractor_1D_fp8x23;
mod array_feature_extractor_1D_fp16x16;
mod array_feature_extractor_2D_i32;
mod array_feature_extractor_2D_fp8x23;
mod array_feature_extractor_2D_fp16x16;
mod array_feature_extractor_3D_i32;
mod array_feature_extractor_3D_fp8x23;
mod array_feature_extractor_3D_fp16x16;
mod binarizer_fp16x16;
mod binarizer_fp8x23;
mod tril_fp16x16;
mod tril_fp16x16_neg;
mod tril_fp16x16_one_row;
mod tril_fp16x16_out_neg;
mod tril_fp16x16_out_pos;
mod tril_fp16x16_pos;
mod tril_fp16x16_square;
mod tril_fp16x16_square_neg;
mod tril_fp16x16_zero;
mod triu_fp16x16;
mod triu_fp16x16_neg;
mod triu_fp16x16_one_row;
mod triu_fp16x16_out_neg;
mod triu_fp16x16_out_pos;
mod triu_fp16x16_pos;
mod triu_fp16x16_square;
mod triu_fp16x16_square_neg;
mod triu_fp16x16_zero;
mod tril_fp8x23;
mod tril_fp8x23_neg;
mod tril_fp8x23_one_row;
mod tril_fp8x23_out_neg;
mod tril_fp8x23_out_pos;
mod tril_fp8x23_pos;
mod tril_fp8x23_square;
mod tril_fp8x23_square_neg;
mod tril_fp8x23_zero;
mod triu_fp8x23;
mod triu_fp8x23_neg;
mod triu_fp8x23_one_row;
mod triu_fp8x23_out_neg;
mod triu_fp8x23_out_pos;
mod triu_fp8x23_pos;
mod triu_fp8x23_square;
mod triu_fp8x23_square_neg;
mod triu_fp8x23_zero;
mod tril_i32;
mod tril_neg_i32;
mod tril_i32_one_row;
mod tril_i32_out_neg;
mod tril_i32_out_pos;
mod tril_i32_pos;
mod tril_i32_square;
mod tril_i32_square_neg;
mod tril_i32_zero;
mod triu_i32;
mod triu_i32_neg;
mod triu_i32_one_row;
mod triu_i32_out_neg;
mod triu_i32_out_pos;
mod triu_i32_pos;
mod triu_i32_square;
mod triu_i32_square_neg;
mod triu_i32_zero;
mod tril_i8;
mod tril_i8_neg;
mod tril_i8_one_row;
mod tril_i8_out_neg;
mod tril_i8_out_pos;
mod tril_i8_pos;
mod tril_i8_square;
mod tril_i8_square_neg;
mod tril_i8_zero;
mod triu_i8;
mod triu_i8_neg;
mod triu_i8_one_row;
mod triu_i8_out_neg;
mod triu_i8_out_pos;
mod triu_i8_pos;
mod triu_i8_square;
mod triu_i8_square_neg;
mod triu_i8_zero;
mod tril_u32;
mod tril_u32_neg;
mod tril_u32_one_row;
mod tril_u32_out_neg;
mod tril_u32_out_pos;
mod tril_u32_pos;
mod tril_u32_square;
mod tril_u32_square_neg;
mod tril_u32_zero;
mod triu_u32;
mod triu_u32_neg;
mod triu_u32_one_row;
mod triu_u32_out_neg;
mod triu_u32_out_pos;
mod triu_u32_pos;
mod triu_u32_square;
mod triu_u32_square_neg;
mod triu_u32_zero;
mod reduce_sum_square_fp16x16_export_do_not_keepdims;
mod reduce_sum_square_fp16x16_export_keepdims;
mod reduce_sum_square_fp16x16_export_negative_axes_keepdims;
mod reduce_sum_square_fp8x23_export_do_not_keepdims;
mod reduce_sum_square_fp8x23_export_keepdims;
mod reduce_sum_square_fp8x23_export_negative_axes_keepdims;
mod reduce_sum_square_i32_export_do_not_keepdims;
mod reduce_sum_square_i32_export_keepdims;
mod reduce_sum_square_i32_export_negative_axes_keepdims;
mod reduce_sum_square_i8_export_do_not_keepdims;
mod reduce_sum_square_i8_export_keepdims;
mod reduce_sum_square_i8_export_negative_axes_keepdims;
mod reduce_sum_square_u32_export_do_not_keepdims;
mod reduce_sum_square_u32_export_keepdims;
mod reduce_sum_square_u32_export_negative_axes_keepdims;
mod reduce_l2_fp16x16_export_do_not_keepdims;
mod reduce_l2_fp16x16_export_keepdims;
mod reduce_l2_fp16x16_export_negative_axes_keepdims;
mod reduce_l2_fp8x23_export_do_not_keepdims;
mod reduce_l2_fp8x23_export_keepdims;
mod reduce_l2_fp8x23_export_negative_axes_keepdims;
mod reduce_l1_fp16x16_export_do_not_keepdims;
mod reduce_l1_fp16x16_export_keepdims;
mod reduce_l1_fp16x16_export_negative_axes_keepdims;
mod reduce_l1_fp8x23_export_do_not_keepdims;
mod reduce_l1_fp8x23_export_keepdims;
mod reduce_l1_fp8x23_export_negative_axes_keepdims;
mod reduce_l1_i32_export_do_not_keepdims;
mod reduce_l1_i32_export_keepdims;
mod reduce_l1_i32_export_negative_axes_keepdims;
mod reduce_l1_i8_export_do_not_keepdims;
mod reduce_l1_i8_export_keepdims;
mod reduce_l1_i8_export_negative_axes_keepdims;
mod reduce_l1_u32_export_do_not_keepdims;
mod reduce_l1_u32_export_keepdims;
mod reduce_l1_u32_export_negative_axes_keepdims;
mod reduce_prod_fp16x16_1D;
mod reduce_prod_fp16x16_2D_default;
mod reduce_prod_fp16x16_2D_keepdims;
mod reduce_prod_fp16x16_2D_axis_1;
mod reduce_prod_fp8x23_1D;
mod reduce_prod_fp8x23_2D_default;
mod reduce_prod_fp8x23_2D_keepdims;
mod reduce_prod_fp8x23_2D_axis_1;
mod reduce_prod_i32_1D;
mod reduce_prod_i32_2D_default;
mod reduce_prod_i32_2D_keepdims;
mod reduce_prod_i32_2D_axis_1;
mod reduce_prod_i8_1D;
mod reduce_prod_i8_2D_default;
mod reduce_prod_i8_2D_keepdims;
mod reduce_prod_i8_2D_axis_1;
mod reduce_prod_u32_1D;
mod reduce_prod_u32_2D_default;
mod reduce_prod_u32_2D_keepdims;
mod reduce_prod_u32_2D_axis_1;
mod sequence_length_fp16x16;
mod sequence_length_fp16x16_broadcast;
mod sequence_length_fp8x23;
mod sequence_length_fp8x23_broadcast;
mod sequence_length_i32;
mod sequence_length_i32_broadcast;
mod sequence_length_i8;
mod sequence_length_i8_broadcast;
mod sequence_length_u32;
mod sequence_length_u32_broadcast;
mod sequence_at_u32_positive;
mod sequence_at_u32_negative;
mod sequence_at_fp16x16_positive;
mod sequence_at_fp16x16_negative;
mod sequence_at_fp8x23_positive;
mod sequence_at_fp8x23_negative;
mod sequence_at_i32_positive;
mod sequence_at_i32_negative;
mod sequence_at_i8_positive;
mod sequence_at_i8_negative;
mod reduce_min_fp16x16_1D;
mod reduce_min_fp16x16_2D_default;
mod reduce_min_fp16x16_2D_keepdims;
mod reduce_min_fp16x16_2D_axis_1;
mod reduce_min_fp8x23_1D;
mod reduce_min_fp8x23_2D_default;
mod reduce_min_fp8x23_2D_keepdims;
mod reduce_min_fp8x23_2D_axis_1;
mod reduce_min_i32_1D;
mod reduce_min_i32_2D_default;
mod reduce_min_i32_2D_keepdims;
mod reduce_min_i32_2D_axis_1;
mod reduce_min_i8_1D;
mod reduce_min_i8_2D_default;
mod reduce_min_i8_2D_keepdims;
mod reduce_min_i8_2D_axis_1;
mod reduce_min_u32_1D;
mod reduce_min_u32_2D_default;
mod reduce_min_u32_2D_keepdims;
mod reduce_min_u32_2D_axis_1;
mod sequence_construct_fp16x16;
mod sequence_construct_fp8x23;
mod sequence_construct_i32;
mod sequence_construct_i8;
mod sequence_construct_u32;
mod shrink_hard_fp16x16;
mod shrink_soft_fp16x16;
mod shrink_hard_fp8x23;
mod shrink_soft_fp8x23;
mod sequence_empty_fp16x16;
mod sequence_empty_fp8x23;
mod sequence_empty_i32;
mod sequence_empty_i8;
mod sequence_empty_u32;
mod reduce_mean_fp16x16_1D;
mod reduce_mean_fp16x16_2D_default;
mod reduce_mean_fp16x16_2D_keepdims;
mod reduce_mean_fp16x16_2D_axis_1;
mod reduce_mean_fp8x23_1D;
mod reduce_mean_fp8x23_2D_default;
mod reduce_mean_fp8x23_2D_keepdims;
mod reduce_mean_fp8x23_2D_axis_1;
mod reduce_mean_i32_1D;
mod reduce_mean_i32_2D_default;
mod reduce_mean_i32_2D_keepdims;
mod reduce_mean_i32_2D_axis_1;
mod reduce_mean_i8_1D;
mod reduce_mean_i8_2D_default;
mod reduce_mean_i8_2D_keepdims;
mod reduce_mean_i8_2D_axis_1;
mod reduce_mean_u32_1D;
mod reduce_mean_u32_2D_default;
mod reduce_mean_u32_2D_keepdims;
mod reduce_mean_u32_2D_axis_1;
mod pow_fp16x16;
mod pow_fp16x16_broadcast;
mod pow_fp8x23;
mod pow_fp8x23_broadcast;
mod sequence_erase_u32_positive;
mod sequence_erase_u32_negative;
mod sequence_erase_u32_empty;
mod sequence_erase_fp16x16_positive;
mod sequence_erase_fp16x16_negative;
mod sequence_erase_fp16x16_empty;
mod sequence_erase_fp8x23_positive;
mod sequence_erase_fp8x23_negative;
mod sequence_erase_fp8x23_empty;
mod sequence_erase_i32_positive;
mod sequence_erase_i32_negative;
mod sequence_erase_i32_empty;
mod sequence_erase_i8_positive;
mod sequence_erase_i8_negative;
mod sequence_erase_i8_empty;
mod sequence_insert_fp16x16;
mod sequence_insert_fp8x23;
mod sequence_insert_i32;
mod sequence_insert_i8;
mod sequence_insert_u32;
mod concat_from_sequence_fp8x23_new_axis_zero;
mod concat_from_sequence_fp8x23_new_axis_one;
mod concat_from_sequence_fp8x23_new_axis_default;
mod concat_from_sequence_fp16x16_new_axis_zero;
mod concat_from_sequence_fp16x16_new_axis_one;
mod concat_from_sequence_fp16x16_new_axis_default;
mod concat_from_sequence_i32_new_axis_zero;
mod concat_from_sequence_i32_new_axis_one;
mod concat_from_sequence_i32_new_axis_default;
mod concat_from_sequence_i8_new_axis_zero;
mod concat_from_sequence_i8_new_axis_one;
mod concat_from_sequence_i8_new_axis_default;
mod concat_from_sequence_u32_new_axis_zero;
mod concat_from_sequence_u32_new_axis_one;
mod concat_from_sequence_u32_new_axis_default;
mod is_nan_fp16x16;
mod is_nan_fp8x23;
mod is_inf_fp16x16;
mod is_inf_fp8x23;
mod is_inf_i32;
mod is_inf_i8;
mod is_inf_u32;
mod is_pos_inf_fp16x16;
mod is_neg_inf_fp16x16;
mod is_pos_inf_fp8x23;
mod is_neg_inf_fp8x23;
mod is_pos_inf_i32;
mod is_neg_inf_i32;
mod is_pos_inf_i8;
mod is_neg_inf_i8;
mod reduce_log_sum_fp8x23_export_do_not_keepdims;
mod reduce_log_sum_fp8x23_export_keepdims;
mod reduce_log_sum_fp8x23_export_negative_axes_keepdims;
mod reduce_log_sum_fp16x16_export_do_not_keepdims;
mod reduce_log_sum_fp16x16_export_keepdims;
mod reduce_log_sum_fp16x16_export_negative_axes_keepdims;
mod and_bool;
mod erf_fp16x16;
mod erf_fp8x23;
mod unique_fp16x16_without_axis_sorted;
mod unique_fp16x16_with_axis_zero_sorted;
mod unique_u32_without_axis_sorted;
mod unique_u32_without_axis_not_sorted;
mod unique_u32_with_axis_zero_sorted;
mod unique_u32_with_axis_zero_not_sorted;
mod unique_u32_with_axis_one_sorted;
mod unique_u32_with_axis_one_not_sorted;
mod gather_nd_fp16x16_3d_default;
mod gather_nd_fp16x16_3d_batch_dims1;
mod gather_nd_fp16x16_3d_batch_dims2;
mod gather_nd_fp8x23_3d_default;
mod gather_nd_fp8x23_3d_batch_dims1;
mod gather_nd_fp8x23_3d_batch_dims2;
mod gather_nd_i32_3d_default;
mod gather_nd_i32_3d_batch_dims1;
mod gather_nd_i32_3d_batch_dims2;
mod gather_nd_i8_3d_default;
mod gather_nd_i8_3d_batch_dims1;
mod gather_nd_u32_default;
mod gather_nd_u32_batch_dims1;
mod gather_nd_u32_batch_dims2;
mod resize_upsample_scales_nearest;
mod resize_downsample_scales_cubic;
mod resize_downsample_scales_cubic_A_n0p5_exclude_outside;
mod resize_downsample_scales_cubic_align_corners;
mod resize_upsample_scales_linear;
mod resize_downsample_scales_linear_align_corners;
mod resize_downsample_scales_nearest;
mod resize_upsample_scales_cubic;
mod resize_upsample_scales_cubic_A_n0p5_exclude_outside;
mod resize_upsample_scales_cubic_align_corners;
mod resize_upsample_scales_cubic_asymmetric;
mod resize_upsample_scales_linear_align_corners;
mod resize_upsample_sizes_nearest;
mod resize_upsample_sizes_cubic;
mod resize_downsample_sizes_cubic;
mod resize_downsample_sizes_nearest;
mod resize_upsample_scales_linear_half_pixel_symmetric;
mod resize_downsample_scales_cubic_antialias;
mod resize_downsample_scales_linear_antialias;
mod resize_downsample_sizes_cubic_antialias;
mod resize_downsample_sizes_linear_pytorch_half_pixel;
mod resize_tf_crop_and_resize;
mod resize_tf_crop_and_resize_extrapolation_value;
mod resize_upsample_scales_nearest_axes_2_3;
mod resize_upsample_scales_nearest_axes_3_2;
mod resize_upsample_sizes_nearest_axes_2_3;
mod resize_upsample_sizes_nearest_ceil_half_pixel;
mod resize_upsample_sizes_nearest_floor_align_corners;
mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric;
mod resize_downsample_scales_linear_half_pixel_symmetric;
mod resize_downsample_sizes_nearest_not_larger;
mod resize_downsample_sizes_nearest_not_smaller;
mod resize_tf_crop_and_resize_axes_2_3;
mod resize_tf_crop_and_resize_axes_3_2;
mod resize_upsample_sizes_nearest_axes_3_2;
mod resize_upsample_sizes_nearest_not_larger;
mod resize_upsample_sizes_nearest_not_smaller;
mod compress_fp16x16_3d_default;
mod compress_fp16x16_3d_axis1;
mod compress_fp16x16_3d_axis2;
mod compress_fp16x16_3d_axis3;
mod compress_fp16x16_3d_noaxis;
mod compress_fp8x23_3d_default;
mod compress_fp8x23_3d_axis1;
mod compress_fp8x23_3d_axis2;
mod compress_i32_3d_default;
mod compress_i32_3d_axis1;
mod compress_i32_3d_axis2;
mod compress_i8_3d_default;
mod compress_i8_3d_axis1;
mod compress_i8_3d_axis2;
mod compress_u32_3d_default;
mod compress_u32_3d_axis1;
mod compress_u32_3d_axis2;
mod compress_u32_3d_axis2_2;
mod compress_u32_3d_axis3;
mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims;
mod reduce_log_sum_exp_fp32x32_export_keepdims;
mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims;
mod layer_normalization_default_axis;
mod layer_normalization_4d_axis0;
mod layer_normalization_4d_axis1;
mod layer_normalization_4d_axis2;
mod layer_normalization_4d_axis3;
mod layer_normalization_3d_axis0_epsilon;
mod layer_normalization_3d_axis_negative_3_epsilon;
mod layer_normalization_3d_axis1_epsilon;
mod layer_normalization_3d_axis2_epsilon;
mod layer_normalization_4d_axis_negative_4;
mod layer_normalization_4d_axis_negative_3;
mod layer_normalization_4d_axis_negative_2;
mod layer_normalization_4d_axis_negative_1;
mod layer_normalization_3d_axis_negative_2_epsilon;
mod layer_normalization_3d_axis_negative_1_epsilon;
mod layer_normalization_test;
mod split_u32_1d_equal_parts;
mod split_u32_2d_equal_parts;
mod split_u32_zero_size;
mod split_u32_1d_variable_parts;
mod split_u32_2d_variable_parts;
mod split_u32_1d_uneven;
mod split_u32_2d_uneven;
mod split_fp16x16_1d_equal_parts;
mod split_fp16x16_1d_variable_parts;
mod split_fp16x16_2d_equal_parts;
mod split_fp16x16_2d_variable_parts;
mod split_fp16x16_zero_size;
mod split_fp16x16_1d_uneven;
mod split_fp16x16_2d_uneven;
mod grid_sample;
mod grid_sample_cubic;
mod grid_sample_aligncorners;
mod grid_sample_nearest;
mod grid_sample_nearest_aligncorner;
mod grid_sample_padding_border;
mod grid_sample_padding_reflection;
mod grid_sample_padding_zeros;
mod col2im;
mod col2im_5D;
mod col2im_dilations;
mod col2im_pads;
mod col2im_strides;
mod random_uniform_like_fp16x16;
mod random_uniform_like_fp8x23;
mod range_fp8x23;
mod range_fp16x16;
mod range_i32;
mod range_i8;
mod range_u32;
mod hann_window_fp8x23;
mod hann_window_fp16x16;
mod hamming_window_fp16x16;
mod hamming_window_fp8x23;
mod blackman_window_fp16x16;
mod blackman_window_fp8x23;
mod split_to_sequence_fp16x16_1d_equal_parts;
mod split_to_sequence_fp16x16_1d_variable_parts;
mod split_to_sequence_fp16x16_2d_equal_parts;
mod split_to_sequence_fp16x16_2d_variable_parts;
mod split_to_sequence_fp16x16_zero_size;
mod split_to_sequence_fp16x16_1d_uneven;
mod split_to_sequence_fp16x16_2d_uneven;
mod split_to_sequence_u32_1d_equal_parts;
mod split_to_sequence_u32_1d_variable_parts;
mod split_to_sequence_u32_2d_equal_parts;
mod split_to_sequence_u32_2d_variable_parts;
mod split_to_sequence_u32_zero_size;
mod split_to_sequence_u32_1d_uneven;
mod split_to_sequence_u32_2d_uneven;
mod split_to_sequence_2d_scalar;
mod split_to_sequence_2d_nokeepdims;
mod split_to_sequence_1d_nokeepdims;
mod reverse_sequence_fp16x16_batch_equal_parts;
mod reverse_sequence_fp16x16_time_equal_parts;
mod reverse_sequence_i32_batch_equal_parts;
mod reverse_sequence_i32_time_equal_parts;
mod reverse_sequence_i8_batch_equal_parts;
mod reverse_sequence_i8_time_equal_parts;
mod reverse_sequence_u32_4x4_batch;
mod reverse_sequence_u32_4x4_time;
mod reverse_sequence_u32_3x3_batch;
mod reverse_sequence_u32_3x3_time;
mod reverse_sequence_different_dimensions_4_5;
mod reverse_sequence_different_dimensions_2_4;
mod reverse_sequence_different_dimensions_1_6;
mod reverse_sequence_different_dimensions_3x9_batch;
mod reverse_sequence_different_dimensions_3x9_time;
mod conv_transpose;
mod conv_transpose_1d;
mod conv_transpose_3d;
mod conv_transpose_attributes;
mod conv_transpose_autopad_same;
mod conv_transpose_dilations;
mod conv_transpose_pads;
mod conv_transpose_group_2;
mod conv_transpose_group_2_image_3;
mod depth_to_space_fp16x16;
mod depth_to_space_fp8x23;
mod depth_to_space_i32;
mod depth_to_space_i8;
mod depth_to_space_u32;
mod space_to_depth_fp16x16;
mod space_to_depth_fp8x23;
mod space_to_depth_i32;
mod space_to_depth_i8;
mod space_to_depth_u32;
mod scatter_nd_fp16x16_3d_default;
mod scatter_nd_fp16x16_3d_add;
mod scatter_nd_fp16x16_3d_mul;
mod scatter_nd_fp16x16_3d_max;
mod scatter_nd_fp16x16_3d_min;
mod scatter_nd_fp8x23_3d_default;
mod scatter_nd_fp8x23_3d_add;
mod scatter_nd_fp8x23_3d_mul;
mod scatter_nd_fp8x23_3d_max;
mod scatter_nd_fp8x23_3d_min;
mod scatter_nd_u32_default;
mod scatter_nd_u32_add;
mod scatter_nd_u32_mul;
mod scatter_nd_u32_max;
mod scatter_nd_u32_min;
mod conv_2D_with_padding;
mod conv_1D_no_padding;
mod conv_1D_with_padding;
mod conv_3D_no_padding;
mod conv_3D_with_padding;
mod conv_4D_no_padding;
mod conv_2D_with_2_groups;
mod conv_2D_with_autopad_same;
mod conv_2D_with_strides_asymmetric_padding;
mod conv_2D_with_strides_with_padding;
mod conv_4D_with_padding;
mod label_encoder_fp16x16_3d_default;
mod label_encoder_fp8x23_default;
mod label_encoder_i8_default;
mod label_encoder_i32_default;
mod label_encoder_u32_default;
mod gather_fp16x16_3d_default;
mod gather_fp16x16_3d_axis1;
mod gather_fp16x16_3d_axis2;
mod gather_negative_indices;
mod gather_negative_axis;
mod less_fp16x16;
mod less_fp16x16_broadcast;
mod less_fp8x23;
mod less_fp8x23_broadcast;
mod less_i32;
mod less_i32_broadcast;
mod less_i8;
mod less_i8_broadcast;
mod less_u32;
mod less_u32_broadcast;
mod reshape_extended_dims;
mod reshape_negative_dim;
mod reshape_negative_extended_dims;
mod reshape_one_dim;
mod reshape_reduced_dims;
mod reshape_reordered_all_dims;
mod reshape_reordered_last_dims;
mod reshape_zero_and_negative_dim;
mod reshape_zero_dim;
mod reduce_sum_default_axes_keepdims;
mod reduce_sum_empty_axes_input_noop;
mod reduce_sum_keep_dims;
mod reduce_sum_negative_axes_keepdims;
mod reduce_sum_no_keep_dims;
mod gather_elements_default;
mod gather_elements_axis1;
mod gather_elements_axis2;
mod gather_elements_negative_indices;
mod softmax_axis_0;
mod softmax_axis_1;
mod softmax_axis_2;
mod softmax_axis_minus_1;
mod argmax_default_axes_keepdims;
mod argmax_default_axes_keepdims_select_last_index;
mod argmax_keepdims;
mod argmax_keepdims_select_last_index;
mod argmax_negative_axis_keepdims;
mod argmax_negative_axis_keepdims_select_last_index;
mod argmax_no_keepdims;
mod argmax_no_keepdims_select_last_index;
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp16x16.cairo | mod input_0;
mod output_0;
use orion::operators::tensor::FP16x16Tensor;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_abs_fp16x16() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.abs();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp16x16/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP16x16Tensor;
use orion::numbers::{FixedTrait, FP16x16};
fn input_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 3670016, sign: false });
data.append(FP16x16 { mag: 7208960, sign: true });
data.append(FP16x16 { mag: 3014656, sign: true });
data.append(FP16x16 { mag: 3932160, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp16x16/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP16x16Tensor;
use orion::numbers::{FixedTrait, FP16x16};
fn output_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 3670016, sign: false });
data.append(FP16x16 { mag: 7208960, sign: false });
data.append(FP16x16 { mag: 3014656, sign: false });
data.append(FP16x16 { mag: 3932160, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp8x23.cairo | mod input_0;
mod output_0;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP8x23Tensor;
use orion::operators::tensor::FP8x23TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_abs_fp8x23() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.abs();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp8x23/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP8x23Tensor;
use orion::numbers::{FixedTrait, FP8x23};
fn input_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 738197504, sign: false });
data.append(FP8x23 { mag: 58720256, sign: false });
data.append(FP8x23 { mag: 285212672, sign: true });
data.append(FP8x23 { mag: 226492416, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_fp8x23/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP8x23Tensor;
use orion::numbers::{FixedTrait, FP8x23};
fn output_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 738197504, sign: false });
data.append(FP8x23 { mag: 58720256, sign: false });
data.append(FP8x23 { mag: 285212672, sign: false });
data.append(FP8x23 { mag: 226492416, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i32.cairo | mod input_0;
mod output_0;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::I32Tensor;
use orion::operators::tensor::I32TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_abs_i32() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.abs();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i32/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I32Tensor;
fn input_0() -> Tensor<i32> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(-35);
data.append(106);
data.append(91);
data.append(-12);
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i32/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I32Tensor;
fn output_0() -> Tensor<i32> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(35);
data.append(106);
data.append(91);
data.append(12);
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i8.cairo | mod input_0;
mod output_0;
use orion::operators::tensor::I8TensorPartialEq;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::I8Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_abs_i8() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.abs();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i8/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I8Tensor;
fn input_0() -> Tensor<i8> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(-85);
data.append(100);
data.append(-90);
data.append(-40);
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/abs_i8/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I8Tensor;
fn output_0() -> Tensor<i8> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(85);
data.append(100);
data.append(90);
data.append(40);
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp16x16.cairo | mod input_0;
mod output_0;
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
use orion::utils::{assert_eq, assert_seq_eq};
#[test]
#[available_gas(2000000000)]
fn test_acos_fp16x16() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.acos();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp16x16/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use orion::numbers::{FixedTrait, FP16x16};
fn input_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 51202, sign: true });
data.append(FP16x16 { mag: 49944, sign: false });
data.append(FP16x16 { mag: 18761, sign: false });
data.append(FP16x16 { mag: 64655, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp16x16/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use orion::numbers::{FixedTrait, FP16x16};
fn output_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 161711, sign: false });
data.append(FP16x16 { mag: 46154, sign: false });
data.append(FP16x16 { mag: 83915, sign: false });
data.append(FP16x16 { mag: 195133, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp8x23.cairo | mod input_0;
mod output_0;
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::operators::tensor::FP8x23TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
use core::array::{ArrayTrait, SpanTrait};
use orion::utils::{assert_eq, assert_seq_eq};
#[test]
#[available_gas(2000000000)]
fn test_acos_fp8x23() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.acos();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp8x23/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::numbers::{FixedTrait, FP8x23};
fn input_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 3764690, sign: false });
data.append(FP8x23 { mag: 556457, sign: true });
data.append(FP8x23 { mag: 529360, sign: false });
data.append(FP8x23 { mag: 2252561, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acos_fp8x23/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::numbers::{FixedTrait, FP8x23};
fn output_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 9272682, sign: false });
data.append(FP8x23 { mag: 13733660, sign: false });
data.append(FP8x23 { mag: 12647081, sign: false });
data.append(FP8x23 { mag: 15457344, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp16x16.cairo | mod input_0;
mod output_0;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_acosh_fp16x16() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.acosh();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp16x16/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use orion::numbers::{FixedTrait, FP16x16};
fn input_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 278211, sign: false });
data.append(FP16x16 { mag: 184787, sign: false });
data.append(FP16x16 { mag: 83173, sign: false });
data.append(FP16x16 { mag: 258400, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp16x16/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub};
use orion::numbers::{FixedTrait, FP16x16};
fn output_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 139248, sign: false });
data.append(FP16x16 { mag: 111195, sign: false });
data.append(FP16x16 { mag: 47062, sign: false });
data.append(FP16x16 { mag: 134255, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp8x23.cairo | mod input_0;
mod output_0;
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::FP8x23TensorPartialEq;
use core::array::{ArrayTrait, SpanTrait};
#[test]
#[available_gas(2000000000)]
fn test_acosh_fp8x23() {
let input_0 = input_0::input_0();
let z = output_0::output_0();
let y = input_0.acosh();
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp8x23/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::numbers::{FixedTrait, FP8x23};
fn input_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 11868883, sign: false });
data.append(FP8x23 { mag: 28161016, sign: false });
data.append(FP8x23 { mag: 27794185, sign: false });
data.append(FP8x23 { mag: 28651727, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/acosh_fp8x23/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub};
use orion::numbers::{FixedTrait, FP8x23};
fn output_0() -> Tensor<FP8x23> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(2);
shape.append(2);
let mut data = ArrayTrait::new();
data.append(FP8x23 { mag: 7399094, sign: false });
data.append(FP8x23 { mag: 15781079, sign: false });
data.append(FP8x23 { mag: 15665784, sign: false });
data.append(FP8x23 { mag: 15932759, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16.cairo | mod input_0;
mod input_1;
mod output_0;
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use core::array::{ArrayTrait, SpanTrait};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
#[test]
#[available_gas(2000000000)]
fn test_add_fp16x16() {
let input_0 = input_0::input_0();
let input_1 = input_1::input_1();
let z = output_0::output_0();
let y = input_0 + input_1;
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn input_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16/input_1.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn input_1() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn output_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 262144, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 393216, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 262144, sign: true });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16_broadcast.cairo | mod input_0;
mod input_1;
mod output_0;
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use core::array::{ArrayTrait, SpanTrait};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor};
#[test]
#[available_gas(2000000000)]
fn test_add_fp16x16_broadcast() {
let input_0 = input_0::input_0();
let input_1 = input_1::input_1();
let z = output_0::output_0();
let y = input_0 + input_1;
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16_broadcast/input_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn input_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16_broadcast/input_1.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn input_1() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(1);
shape.append(3);
shape.append(1);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp16x16_broadcast/output_0.cairo | use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FixedTrait, FP16x16};
fn output_0() -> Tensor<FP16x16> {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 196608, sign: true });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 262144, sign: false });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 131072, sign: true });
data.append(FP16x16 { mag: 262144, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
data.append(FP16x16 { mag: 262144, sign: false });
data.append(FP16x16 { mag: 262144, sign: false });
data.append(FP16x16 { mag: 131072, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 262144, sign: true });
data.append(FP16x16 { mag: 262144, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
data.append(FP16x16 { mag: 196608, sign: false });
TensorTrait::new(shape.span(), data.span())
}
| https://github.com/gizatechxyz/orion |
tests/nodes/add_fp8x23.cairo | mod input_0;
mod input_1;
mod output_0;
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd};
use orion::operators::tensor::FP8x23TensorPartialEq;
use core::array::{ArrayTrait, SpanTrait};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::{TensorTrait, Tensor};
#[test]
#[available_gas(2000000000)]
fn test_add_fp8x23() {
let input_0 = input_0::input_0();
let input_1 = input_1::input_1();
let z = output_0::output_0();
let y = input_0 + input_1;
assert_eq(y, z);
}
| https://github.com/gizatechxyz/orion |