file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
docgen/src/main.rs
use regex::Regex; use std::fs; use std::path::Path; fn main() { // TENSOR DOC let trait_path = "src/operators/tensor/core.cairo"; let doc_path = "docs/framework/operators/tensor"; let label = "tensor"; let trait_name = "TensorTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // NN DOC let trait_path = "src/operators/nn/core.cairo"; let doc_path = "docs/framework/operators/neural-network"; let label = "nn"; let trait_name = "NNTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // SEQUENCE DOC let trait_path = "src/operators/sequence/core.cairo"; let doc_path = "docs/framework/operators/sequence"; let label = "sequence"; let trait_name = "SequenceTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // FIXED POINT DOC let trait_path = "src/numbers/fixed_point/core.cairo"; let doc_path = "docs/framework/numbers/fixed-point"; let label = "fp"; let trait_name = "FixedTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // COMPLEX NUMBER DOC let trait_path = "src/numbers/complex_number/complex_trait.cairo"; let doc_path = "docs/framework/numbers/complex-number"; let label = "complex"; let trait_name: &str = "ComplexTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // TREE ENSEMBLE CLASSIFIER DOC let trait_path = "src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo"; let doc_path = "docs/framework/operators/machine-learning/tree-ensemble-classifier"; let label = "tree_ensemble_classifier"; let trait_name: &str = "TreeEnsembleClassifierTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // TREE ENSEMBLE REGRESSOR DOC let trait_path = "src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo"; let doc_path = "docs/framework/operators/machine-learning/tree-ensemble-regressor"; let label = "tree_ensemble_regressor"; let trait_name: &str = "TreeEnsembleRegressorTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // LINEAR REGRESSOR DOC let trait_path = "src/operators/ml/linear/linear_regressor.cairo"; let doc_path = "docs/framework/operators/machine-learning/linear-regressor"; let label = "linear_regressor"; let trait_name: &str = "LinearRegressorTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // LINEAR CLASSIFIER DOC let trait_path = "src/operators/ml/linear/linear_classifier.cairo"; let doc_path = "docs/framework/operators/machine-learning/linear-classifier"; let label = "linear_classifier"; let trait_name: &str = "LinearClassifierTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // SVM REGRESSOR DOC let trait_path = "src/operators/ml/svm/svm_regressor.cairo"; let doc_path = "docs/framework/operators/machine-learning/svm-regressor"; let label = "svm_regressor"; let trait_name: &str = "SVMRegressorTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // SVM CLASSIFIER DOC let trait_path = "src/operators/ml/svm/svm_classifier.cairo"; let doc_path = "docs/framework/operators/machine-learning/svm-classifier"; let label = "svm_classifier"; let trait_name: &str = "SVMClassifierTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); // NORMALIZER DOC let trait_path = "src/operators/ml/normalizer/normalizer.cairo"; let doc_path = "docs/framework/operators/machine-learning/normalizer"; let label = "normalizer"; let trait_name: &str = "NormalizerTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); } fn doc_trait(trait_path: &str, doc_path: &str, label: &str) { // Open and read core.cairo file let path_str = format!("../{}", trait_path); let path = Path::new(&path_str); let contents = fs::read_to_string(&path).expect("Could not read the file"); // Create a regular expression to match the comment lines let re = Regex::new(r#"/// (\w+) - (.*)"#).unwrap(); // Initialize an empty string to store our new formatted table let mut table = String::from("| function | description |\n| --- | --- |\n"); // Go through the file and look for comments with our specific format for cap in re.captures_iter(&contents) { // Check if the function is the Trait definition and skip it if &cap[1] == "Trait" { continue; } // Add the function name and description to our table let func_name = format!( "[`{}.{}`]({}.{}.md)", label, &cap[1], label, &cap[1].replace('_', r"\_") ); let func_desc = &cap[2]; table += &format!("| {} | {} |\n", func_name, func_desc); } // Open the README.md file let readme_path_str = format!("../{}/README.md", doc_path); let readme_path = Path::new(&readme_path_str); let readme = fs::read_to_string(&readme_path).expect("Could not read the file"); // Use regex to replace the table let re_table = Regex::new(r"(?ms)\n\n\| fun.*?(\n[^|]|\z)").unwrap(); let new_readme = re_table.replace(&readme, &("\n\n".to_owned() + &table + "\n")); // Write the updated contents back to README.md fs::write(&readme_path, &*new_readme).expect("Could not write the file"); } fn doc_functions(trait_path: &str, doc_path: &str, trait_name: &str, label: &str) { let filepath_str = format!("../{}", trait_path); let filepath = Path::new(&filepath_str); let contents = fs::read_to_string(filepath).expect("Something went wrong reading the file"); // Find the trait block let trait_re = Regex::new(&format!( r"(?s)trait\s+{}\s*(<[\w\s,]*>)?\s*\{{.*?\n\s*\}}", trait_name )) .unwrap(); let trait_match = trait_re.captures(&contents).unwrap(); let trait_block = trait_match.get(0).unwrap().as_str(); // Iterate over each function let func_re = Regex::new(r"(?s)(///.*?\n)\s*fn (\w+)\((.*?)\) -> (.*?);").unwrap(); for func_match in func_re.captures_iter(trait_block) { let func_name = func_match.get(2).unwrap().as_str(); let doc_comment = func_match.get(1).unwrap().as_str(); // Go to the appropriate markdown file and write the transformed doc comment let markdown_filename = format!("../{}/{}.{}.md", doc_path, label, func_name); let transformed_comment = doc_comment .lines() .map(|line| { line.trim_start().strip_prefix("/// ").unwrap_or( line.trim_start() .strip_prefix("///") .unwrap_or(line.trim_start()), ) }) .collect::<Vec<_>>() .join("\n"); // Write or replace the transformed comment into the markdown file fs::write(markdown_filename, transformed_comment).expect("Unable to write file"); } }
https://github.com/gizatechxyz/orion
nodegen/__init__.py
https://github.com/gizatechxyz/orion
nodegen/file_manager.py
import os from pathlib import Path BASE_PATH = "./tests/nodes" class ModFile: def __init__(self): """ Initialize a ModFile object. This method creates a new file with a .cairo extension in the BASE_PATH directory. If the directory doesn't exist, it's created. The contents of the file are then read into the buffer attribute. """ self.path = Path(f"{BASE_PATH}.cairo") self.path.parent.mkdir(parents=True, exist_ok=True) with self.path.open("r") as f: self.buffer = f.readlines() def update(self, name: str): """ Update the .cairo file with a new module statement. Args: name (str): The name of the module to be added. This method checks if a module statement for the given name already exists in the buffer. If it doesn't, the new module statement is appended to the file. """ statement = f"mod {name};" if any([line.startswith(statement) for line in self.buffer]): return with self.path.open("a") as f: f.write(f"{statement}\n") class File: def __init__(self, path: str): """ Initialize a File object. Args: path (str): The file path where the File object will operate. This method creates a new file at the specified path. If the file already exists, its contents are read into the buffer attribute. """ self.path = Path(path) self.path.parent.mkdir(parents=True, exist_ok=True) self.buffer = [] if os.path.isfile(path): with self.path.open("r") as f: self.buffer = f.readlines() def dump(self): """ Write the contents of the buffer to the file. This method writes each line in the buffer to the file, ensuring each line is properly terminated with a newline character. """ with self.path.open("w") as f: f.writelines([f"{line}\n" for line in self.buffer]) class CairoTest(File): def __init__(self, file: str): super().__init__(os.path.join(BASE_PATH, file)) @classmethod def base_template( cls, name: str, arg_cnt: int, refs: list[str], func_sig: str, out_cnt: int = 1 ) -> list[str]: """ Create a template for a Cairo test function which expects a tensor output. Args: name (str): Name of the test function. arg_cnt (int): Number of arguments for the function. refs (list[str]): List of references (modules) to be used in the function. func_sig (str): The function signature. out_cnt (int): Number of outputs for the function. Defaults to 1. Returns: list[str]: A list of strings that together form the template of a Cairo test function. This method generates a list of strings that form the template of a Cairo test function, including module imports, function definition, and assertions. """ template = [ *[f"mod input_{i};" for i in range(arg_cnt)], *[f"mod output_{i};" for i in range(out_cnt)], "", "", *[f"use {ref};" for ref in refs], "", "#[test]", "#[available_gas(2000000000)]", f"fn test_{name}()" + " {", *[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)], *[f" let z_{i} = output_{i}::output_{i}();" for i in range(out_cnt)], "" ] # Handling conditional function signature based on the number of outputs if out_cnt > 1: template.append(f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};") else: template.append(f" let y_0 = {func_sig};") # Continue appending to the template template.extend([ "", *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], "}" ]) return template @classmethod def sequence_template(cls, name: str, arg_cnt: int, refs: list[str], func_sig: str) -> list[str]: """ Create a template for a Cairo test function which expects a tensor sequence. Args: name (str): Name of the test function. arg_cnt (int): Number of arguments for the function. refs (list[str]): List of references (modules) to be used in the function. func_sig (str): The function signature. Returns: list[str]: A list of strings that together form the template of a Cairo test function. This method generates a list of strings that form the template of a Cairo test function, including module imports, function definition, and assertions. """ return [ *[f"mod input_{i};" for i in range(arg_cnt)], *[ "mod output_0;"], *[ ""], *[ ""], *[f"use {ref};" for ref in refs], *[ ""], *[ "#[test]"], *[ "#[available_gas(2000000000)]"], *[f"fn test_{name}()"+" {"], *[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)], *[ " let z = output_0::output_0();"], *[ ""], *[f" let y = {func_sig};"], *[ ""], *[ " assert_seq_eq(y, z);"], *[ "}"], ] class CairoData(File): def __init__(self, file: str): super().__init__(os.path.join(BASE_PATH, file)) @classmethod def base_template( cls, func: str, dtype: str, refs: list[str], data: list[str], shape: tuple ) -> list[str]: """ Create a base template for data representation in Cairo. Args: func (str): The function name. dtype (str): The data type of the tensor. refs (list[str]): A list of module references. data (list[str]): The data to be included in the tensor. shape (tuple): The shape of the tensor. Returns: list[str]: A list of strings that together form the template of a data function in Cairo. This method generates a list of strings representing a function in Cairo for data handling, defining the shape and contents of a tensor. """ template = [ *[f"use {ref};" for ref in refs], *[""], *[f"fn {func}() -> Tensor<{dtype}>" + " {"], *[" let mut shape = ArrayTrait::<usize>::new();"], *[f" shape.append({s});" for s in shape], *[""], *[" let mut data = ArrayTrait::new();"], *[f" data.append({d});" for d in data], *[" TensorTrait::new(shape.span(), data.span())"], *["}"], ] return template @classmethod def sequence_template( cls, func: str, dtype: str, refs: list[str], data: list[list[str]], shape: list[tuple], ) -> list[str]: """ Create a template for handling tensor sequences in Cairo. Args: func (str): The function name. dtype (str): The data type of the tensor sequence. refs (list[str]): A list of module references. data (list[list[str]]): The data to be included in each tensor. shape (list[tuple]): The shapes of each tensor in the sequence. Returns: list[str]: A list of strings that together form the template of a sequence tensor function in Cairo. This method generates a list of strings representing a function in Cairo for handling a sequence of tensors, each with its own data and shape. """ def expand_sequence_init(s: list[tuple], d: list[list[str]]) -> list[str]: snippet = [] for i in range(len(s)): snippet += [ *[" let mut shape = ArrayTrait::<usize>::new();"], *[f" shape.append({s});" for s in s[i]], *[""], *[" let mut data = ArrayTrait::new();"], *[f" data.append({d});" for d in d[i]], *[""], *[ " sequence.append(TensorTrait::new(shape.span(), data.span()));" ], *[""], ] return snippet template = [ *[f"use {ref};" for ref in refs], *[""], *[f"fn {func}() -> Array<Tensor<{dtype}>>" + " {"], *[" let mut sequence = ArrayTrait::new();"], *[""], *expand_sequence_init(shape, data), *[" sequence"], *["}"], ] return template
https://github.com/gizatechxyz/orion
nodegen/helpers.py
from enum import Enum import os from typing import List from .file_manager import CairoTest, CairoData, ModFile import numpy as np class FixedImpl(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' FP32x32 = 'FP32x32' def to_fp(x: np.ndarray, fp_impl: FixedImpl): match fp_impl: case FixedImpl.FP8x23: return (x * 2**23).astype(np.int64) case FixedImpl.FP16x16: return (x * 2**16).astype(np.int64) case FixedImpl.FP32x32: return (x * 2**32).astype(np.int64) class Dtype(Enum): FP8x23 = 'FP8x23' FP16x16 = 'FP16x16' FP32x32 = 'FP32x32' I8 = 'i8' I32 = 'i32' U32 = 'u32' BOOL = 'bool' COMPLEX64 = 'complex64' class Tensor: def __init__(self, dtype: Dtype, shape: tuple, data: np.ndarray): self.dtype = dtype self.shape = shape self.data = data Sequence = List[Tensor] class Trait(Enum): TENSOR = 'TENSOR' NN = 'NN' SEQUENCE = 'SEQUENCE' def make_test(inputs: list[Tensor | Sequence], output: Tensor | Sequence, func_sig: str, name: str, trait: Trait = Trait.TENSOR): """ Generate and write Cairo tests based on the provided inputs and output. Args: inputs (list[Tensor | list[Tensor]]): A list of input tensors or tensor sequences. output (Tensor | list[Tensor]): The expected output tensor or tensor sequences. func_sig (str): The signature of the function to be tested. name (str): The name of the test. trait (Trait, optional): The trait of the tensors. Defaults to Trait.TENSOR. """ ModFile().update(name) for i, input in enumerate(inputs): input_data = CairoData(os.path.join(name, f"input_{i}.cairo")) match input: case list(): input_data.buffer = CairoData.sequence_template( func=f"input_{i}", dtype=input[0].dtype.value, refs=get_data_refs(input[0].dtype), data=get_data_statement_for_sequences( input, input[0].dtype), shape=[x.shape for x in input], ) case Tensor(): input_data.buffer = CairoData.base_template( func=f"input_{i}", dtype=input.dtype.value, refs=get_data_refs(input.dtype), data=get_data_statement(input.data, input.dtype), shape=input.shape, ) input_data.dump() match output: case list(): output_data = CairoData(os.path.join(name, "output_0.cairo")) output_data.buffer = CairoData.sequence_template( func="output_0", dtype=output[0].dtype.value, refs=get_data_refs(output[0].dtype), data=get_data_statement_for_sequences(output, output[0].dtype), shape=[x.shape for x in output], ) output_data.dump() case tuple(): for i, out in enumerate(output): output_data = CairoData( os.path.join(name, f"output_{i}.cairo")) output_data.buffer = CairoData.base_template( func=f"output_{i}", dtype=out.dtype.value, refs=get_data_refs(out.dtype), data=get_data_statement(out.data, out.dtype), shape=out.shape, ) output_data.dump() case Tensor(): output_data = CairoData(os.path.join(name, "output_0.cairo")) output_data.buffer = CairoData.base_template( func="output_0", dtype=output.dtype.value, refs=get_data_refs(output.dtype), data=get_data_statement(output.data, output.dtype), shape=output.shape, ) output_data.dump() test_file = CairoTest(f"{name}.cairo") match output: case list(): test_file.buffer = CairoTest.sequence_template( name=name, arg_cnt=len(inputs), refs=get_all_test_refs(find_all_types([*inputs, *output]), trait), func_sig=func_sig, ) case Tensor(): test_file.buffer = CairoTest.base_template( name=name, arg_cnt=len(inputs), refs=get_all_test_refs(find_all_types([*inputs, output]), trait), func_sig=func_sig, ) case tuple(): test_file.buffer = CairoTest.base_template( name=name, arg_cnt=len(inputs), out_cnt=len(output), refs=get_all_test_refs(find_all_types([*inputs, output]), trait), func_sig=func_sig, ) test_file.dump() def get_data_refs(dtype: Dtype) -> list[str]: refs = [ *trait_to_ref[Trait.TENSOR], *dtype_to_tensor[dtype], *dtype_to_numbers[dtype], ] return refs def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]: match dtype: case Dtype.U32: return [f"{int(x)}" for x in data.flatten()] case Dtype.I32: return [f"{int(x)}" for x in data.flatten()] case Dtype.I8: return [f"{int(x)}" for x in data.flatten()] case Dtype.FP8x23: return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.FP16x16: return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.FP32x32: return ["FP32x32 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()] case Dtype.BOOL: return [str(x).lower() for x in data.flatten()] case Dtype.COMPLEX64: return ["complex64 { "+"real: FP64x64 { "+f"mag: {abs(int(np.real(x)))}, sign: {str(np.real(x) < 0).lower()} "+"} , img: FP64x64 { "+f"mag: {abs(int(np.imag(x)))}, sign: {str(np.imag(x) < 0).lower()} "+"} }" for x in data.flatten()] def get_data_statement_for_sequences(data: Sequence, dtype: Dtype) -> list[list[str]]: return [get_data_statement(x.data, dtype) for x in data] def get_all_test_refs(dtypes: list[Dtype], trait: Trait) -> list[str]: refs = [] for dtype in dtypes: # refs += [*dtype_to_numbers[dtype]] refs += get_test_refs(dtype, trait) return list(set(refs)) def get_test_refs(dtype: Dtype, trait: Trait) -> list[str]: if trait == Trait.NN and dtype == Dtype.BOOL: raise Exception("NN trait does not support bool dtype") if trait == Trait.NN: dtype_ref = dtype_to_nn[dtype] elif trait == Trait.SEQUENCE: dtype_ref = dtype_to_sequence[dtype] else: dtype_ref = dtype_to_tensor[dtype] refs = [ *trait_to_ref[trait], *dtype_ref, *dtype_to_partial_eq[dtype], "orion::utils::{assert_eq, assert_seq_eq}", ] return refs def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]: dtypes = [] for tensor in tensors: if isinstance(tensor, list) or isinstance(tensor, tuple): dtypes += [x.dtype for x in tensor] else: dtypes.append(tensor.dtype) return list(set(dtypes)) trait_to_ref = { Trait.TENSOR: [ "core::array::{ArrayTrait, SpanTrait}", "orion::operators::tensor::{TensorTrait, Tensor}", ], Trait.NN: [ "orion::numbers::FixedTrait", "orion::operators::nn::NNTrait", ], Trait.SEQUENCE: [ "core::array::{ArrayTrait, SpanTrait}", "orion::operators::sequence::SequenceTrait", ], } dtype_to_tensor = { Dtype.U32: ["orion::operators::tensor::{U32Tensor, U32TensorAdd}",], Dtype.I32: ["orion::operators::tensor::{I32Tensor, I32TensorAdd}",], Dtype.I8: ["orion::operators::tensor::{I8Tensor, I8TensorAdd}",], Dtype.FP8x23: ["orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}",], Dtype.FP16x16: ["orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}",], Dtype.BOOL: ["orion::operators::tensor::BoolTensor",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",], Dtype.FP32x32: ["orion::operators::tensor::FP32x32Tensor",], } dtype_to_nn = { Dtype.U32: ["orion::operators::nn::U32NN",], Dtype.I32: ["orion::operators::nn::I32NN",], Dtype.I8: ["orion::operators::nn::I8NN",], Dtype.FP8x23: ["orion::operators::nn::FP8x23NN",], Dtype.FP16x16: ["orion::operators::nn::FP16x16NN",], } dtype_to_sequence = { Dtype.U32: ["orion::operators::sequence::U32Sequence",], Dtype.I32: ["orion::operators::sequence::I32Sequence",], Dtype.I8: ["orion::operators::sequence::I8Sequence",], Dtype.FP8x23: ["orion::operators::sequence::FP8x23Sequence",], Dtype.FP16x16: ["orion::operators::sequence::FP16x16Sequence",], } dtype_to_partial_eq = { Dtype.U32: ["orion::operators::tensor::U32TensorPartialEq",], Dtype.I32: ["orion::operators::tensor::I32TensorPartialEq",], Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",], Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",], Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",], Dtype.FP32x32: ["orion::operators::tensor::FP32x32TensorPartialEq",], Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",], Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",], } dtype_to_numbers = { Dtype.U32: ["orion::numbers::NumberTrait"], Dtype.I32: ["orion::numbers::NumberTrait"], Dtype.I8: ["orion::numbers::NumberTrait"], Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",], Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",], Dtype.FP32x32: ["orion::numbers::{FixedTrait, FP32x32}",], Dtype.BOOL: [], Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",], }
https://github.com/gizatechxyz/orion
nodegen/node/__init__.py
import argparse import importlib import os import sys class RunAll: @classmethod def run_all(cls): for method_name in dir(cls): if method_name.startswith('__') or method_name == 'run_all': continue method = getattr(cls, method_name) if callable(method): method() # Add the path to the 'orion' directory to the Python path sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) def main(): parser = argparse.ArgumentParser(description="Generate nodes.") parser.add_argument('node_class', help="The class of node to run.") args = parser.parse_args() class_name = args.node_class.capitalize() # Verify that the specified Python file exists filename = os.path.join('nodegen/node', args.node_class + '.py') if not os.path.exists(filename): print(f"Error: {filename} does not exist.") return # Import the module dynamically module = importlib.import_module('nodegen.node.' + args.node_class) # Get the class from the module node_class = getattr(module, class_name) # Instantiate the class and call the run_all method node_instance = node_class() node_instance.run_all() if __name__ == "__main__": main()
https://github.com/gizatechxyz/orion
nodegen/node/abs.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Abs(RunAll): @staticmethod def abs_i32(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = abs(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "abs_i32" make_test([x], y, "input_0.abs()", name) @staticmethod def abs_i8(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = abs(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "abs_i8" make_test([x], y, "input_0.abs()", name) @staticmethod def abs_fp8x23(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP8x23) y = abs(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "abs_fp8x23" make_test([x], y, "input_0.abs()", name) @staticmethod def abs_fp16x16(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP16x16) y = abs(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "abs_fp16x16" make_test([x], y, "input_0.abs()", name)
https://github.com/gizatechxyz/orion
nodegen/node/acos.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Acos(RunAll): @staticmethod def acos_fp8x23(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.arccos(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "acos_fp8x23" make_test([x], y, "input_0.acos()", name) @staticmethod def acos_fp16x16(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.arccos(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "acos_fp16x16" make_test([x], y, "input_0.acos()", name)
https://github.com/gizatechxyz/orion
nodegen/node/acosh.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Acosh(RunAll): @staticmethod def acosh_fp8x23(): x = np.random.uniform(1, 5, (2, 2)).astype(np.float64) y = np.arccosh(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "acosh_fp8x23" make_test([x], y, "input_0.acosh()", name) @staticmethod def acosh_fp16x16(): x = np.random.uniform(1, 5, (2, 2)).astype(np.float64) y = np.arccosh(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "acosh_fp16x16" make_test([x], y, "input_0.acosh()", name)
https://github.com/gizatechxyz/orion
nodegen/node/add.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Add(RunAll): @staticmethod def add_u32(): def default(): x = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32) z = x + y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "add_u32" make_test([x, y], z, "input_0 + input_1", name) def broadcast(): x = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32) z = x + y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "add_u32_broadcast" make_test([x, y], z, "input_0 + input_1", name) default() broadcast() @staticmethod def add_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = x + y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "add_i32" make_test([x, y], z, "input_0 + input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32) z = x + y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "add_i32_broadcast" make_test([x, y], z, "input_0 + input_1", name) default() broadcast() @staticmethod def add_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = x + y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "add_i8" make_test([x, y], z, "input_0 + input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8) z = x + y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "add_i8_broadcast" make_test([x, y], z, "input_0 + input_1", name) default() broadcast() @staticmethod def add_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x + y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "add_fp8x23" make_test([x, y], z, "input_0 + input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x + y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "add_fp8x23_broadcast" make_test([x, y], z, "input_0 + input_1", name) default() broadcast() @staticmethod def add_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x + y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "add_fp16x16" make_test([x, y], z, "input_0 + input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x + y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "add_fp16x16_broadcast" make_test([x, y], z, "input_0 + input_1", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/and.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class And(RunAll): @staticmethod def and_bool(): def default(): x = (np.random.randn(3, 4) > 0).astype(bool) y = (np.random.randn(3, 4) > 0).astype(bool) z = np.logical_and(x, y) x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "and_bool" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) def broadcast(): x = (np.random.randn(3, 4, 5) > 0).astype(bool) y = (np.random.randn(3, 4, 5) > 0).astype(bool) z = np.logical_and(x, y) x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "and_bool_broadcast" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/argmax.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray: result = np.argmax(data, axis=axis) if keepdims == 1: result = np.expand_dims(result, axis) return result.astype(np.int64) def argmax_use_numpy_select_last_index( data: np.ndarray, axis: int = 0, keepdims: int = True ) -> np.ndarray: data = np.flip(data, axis) result = np.argmax(data, axis=axis) result = data.shape[axis] - result - 1 if keepdims: result = np.expand_dims(result, axis) return result.astype(np.int64) class Argmax(RunAll): @staticmethod def no_keepdims(): data = np.array([[2, 1], [3, 10]], dtype=np.float32) axis = 1 keepdims = 0 result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_no_keepdims" make_test( [x], y, "input_0.argmax(1, Option::Some(false), Option::None(()))", name) @staticmethod def keepdims(): data = np.array([[2, 1], [3, 10]], dtype=np.float32) axis = 1 keepdims = 1 result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_keepdims" make_test( [x], y, "input_0.argmax(1, Option::Some(true), Option::None(()))", name) @staticmethod def default_axes_keepdims(): data = np.array([[2, 1], [3, 10]], dtype=np.float32) keepdims = 1 result = argmax_use_numpy(data, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_default_axes_keepdims" make_test( [x], y, "input_0.argmax(0, Option::Some(true), Option::None(()))", name) @staticmethod def negative_axis_keepdims(): data = np.array([[2, 1], [3, 10]], dtype=np.float32) axis = -1 keepdims = 1 result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_negative_axis_keepdims" make_test( [x], y, "input_0.argmax(-1, Option::Some(true), Option::None(()))", name) @staticmethod def no_keepdims_select_last_index(): data = np.array([[2, 2], [3, 10]], dtype=np.float32) axis = 1 keepdims = 0 result = argmax_use_numpy_select_last_index( data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_no_keepdims_select_last_index" make_test( [x], y, "input_0.argmax(1, Option::Some(false), Option::Some(true))", name) @staticmethod def keepdims_select_last_index(): data = np.array([[2, 2], [3, 10]], dtype=np.float32) axis = 1 keepdims = 1 result = argmax_use_numpy_select_last_index( data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_keepdims_select_last_index" make_test( [x], y, "input_0.argmax(1, Option::Some(true), Option::Some(true))", name) @staticmethod def default_axes_keepdims_select_last_index(): data = np.array([[2, 2], [3, 10]], dtype=np.float32) keepdims = 1 result = argmax_use_numpy_select_last_index(data, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_default_axes_keepdims_select_last_index" make_test( [x], y, "input_0.argmax(0, Option::Some(true), Option::Some(true))", name) @staticmethod def negative_axis_keepdims_select_last_index(): data = np.array([[2, 2], [3, 10]], dtype=np.float32) axis = -1 keepdims = 1 result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) y = Tensor(Dtype.I32, result.shape, result.flatten()) name = "argmax_negative_axis_keepdims_select_last_index" make_test( [x], y, "input_0.argmax(-1, Option::Some(true), Option::Some(true))", name)
https://github.com/gizatechxyz/orion
nodegen/node/argmin.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def argmin_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1, dtype=np.int64) -> np.ndarray: result = np.argmin(data, axis=axis) if keepdims == 1: result = np.expand_dims(result, axis) return result.astype(dtype) def argmin_use_numpy_select_last_index( data: np.ndarray, axis: int = 0, keepdims: int = True, dtype=np.int64 ) -> np.ndarray: data = np.flip(data, axis) result = np.argmin(data, axis=axis) result = data.shape[axis] - result - 1 if keepdims: result = np.expand_dims(result, axis) return result.astype(dtype) class Argmin(RunAll): @staticmethod def argmin_u32(): def argmin_1D(): def default_params(): x = np.random.randint(0, 255, (3)).astype(np.uint32) y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_1D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(0, 255, (3)).astype(np.uint32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_1D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(0, 255, (3)).astype(np.uint32) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_1D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_1D() def argmin_2D(): def default_params(): x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_2D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_2D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_2D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_2D() def argmin_3D(): def default_params(): x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_3D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_3D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_u32_3D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_3D() @staticmethod def argmin_i32(): def argmin_1D(): def default_params(): x = np.random.randint(-127, 127, (3)).astype(np.int32) y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_1D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (3)).astype(np.int32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_1D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(0, 255, (3)).astype(np.int32) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_1D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_1D() def argmin_2D(): def default_params(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_2D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_2D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = argmin_use_numpy_select_last_index( x, dtype=np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_2D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_2D() def argmin_3D(): def default_params(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_3D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_3D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i32_3D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_3D() @staticmethod def argmin_i8(): def argmin_1D(): def default_params(): x = np.random.randint(-127, 127, (3)).astype(np.int8) y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_1D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (3)).astype(np.int8) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_1D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(0, 255, (3)).astype(np.int8) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_1D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_1D() def argmin_2D(): def default_params(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_2D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_2D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = argmin_use_numpy_select_last_index( x, dtype=np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_2D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_2D() def argmin_3D(): def default_params(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_3D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_3D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_i8_3D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_3D() @staticmethod def argmin_fp16x16(): def argmin_1D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (3) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_1D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (3) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_1D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(0, 255, (3)).astype( np.int8), FixedImpl.FP16x16) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_1D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_1D() def argmin_2D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_2D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_2D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy_select_last_index( x, dtype=np.int8) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_2D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_2D() def argmin_3D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_3D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_3D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP16x16) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp16x16_3D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_3D() @staticmethod def argmin_fp8x23(): def argmin_1D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (3) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy(x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_1D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (3) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_1D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(0, 255, (3)).astype( np.int8), FixedImpl.FP8x23) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32).reshape((1)) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_1D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_1D() def argmin_2D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_2D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_2D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy_select_last_index( x, dtype=np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_2D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_2D() def argmin_3D(): def default_params(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy(x, dtype=np.uint32) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_3D_default" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name) def keepdims_false(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy( x, keepdims=0, dtype=np.uint32) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_3D_keepdims_false" make_test( [x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name) def last_index(): x = to_fp(np.random.randint(-127, 127, (2, 2, 2) ).astype(np.int8), FixedImpl.FP8x23) y = argmin_use_numpy_select_last_index( x, dtype=np.uint32) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "argmin_fp8x23_3D_last_index" make_test( [x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name) default_params() keepdims_false() last_index() argmin_3D()
https://github.com/gizatechxyz/orion
nodegen/node/array_feature_extractor.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Array_feature_extractor(RunAll): @staticmethod def array_feature_extractor_3D(): def array_feature_extractor_i32(): x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.int32) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "array_feature_extractor_3D_i32" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp8x23(): x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "array_feature_extractor_3D_fp8x23" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp16x16(): x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "array_feature_extractor_3D_fp16x16" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) array_feature_extractor_i32() array_feature_extractor_fp8x23() array_feature_extractor_fp16x16() @staticmethod def array_feature_extractor_2D(): def array_feature_extractor_i32(): x = np.random.randint(-3, 3, (3, 4)).astype(np.int32) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "array_feature_extractor_2D_i32" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp8x23(): x = np.random.randint(-3, 3, (3, 4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "array_feature_extractor_2D_fp8x23" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp16x16(): x = np.random.randint(-3, 3, (3, 4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "array_feature_extractor_2D_fp16x16" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) array_feature_extractor_i32() array_feature_extractor_fp8x23() array_feature_extractor_fp16x16() @staticmethod def array_feature_extractor_1D(): def array_feature_extractor_i32(): x = np.random.randint(-3, 3, (4)).astype(np.int32) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "array_feature_extractor_1D_i32" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp8x23(): x = np.random.randint(-3, 3, (4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "array_feature_extractor_1D_fp8x23" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) def array_feature_extractor_fp16x16(): x = np.random.randint(-3, 3, (4)).astype(np.float64) y = np.array([1, 3]).astype(np.uint32) z = (x[..., y]) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "array_feature_extractor_1D_fp16x16" make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name) array_feature_extractor_i32() array_feature_extractor_fp8x23() array_feature_extractor_fp16x16()
https://github.com/gizatechxyz/orion
nodegen/node/asin.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Asin(RunAll): @staticmethod def asin_fp8x23(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.arcsin(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "asin_fp8x23" make_test([x], y, "input_0.asin()", name) @staticmethod def asin_fp16x16(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.arcsin(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "asin_fp16x16" make_test([x], y, "input_0.asin()", name)
https://github.com/gizatechxyz/orion
nodegen/node/asinh.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Asinh(RunAll): @staticmethod def asinh_fp8x23(): x = np.random.uniform(1, 5, (2, 2)).astype(np.float64) y = np.arcsinh(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "asinh_fp8x23" make_test([x], y, "input_0.asinh()", name) @staticmethod def asinh_fp16x16(): x = np.random.uniform(1, 5, (2, 2)).astype(np.float64) y = np.arcsinh(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "asinh_fp16x16" make_test([x], y, "input_0.asinh()", name)
https://github.com/gizatechxyz/orion
nodegen/node/atan.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Atan(RunAll): @staticmethod def atan_fp8x23(): x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64) y = np.arctan(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "atan_fp8x23" make_test([x], y, "input_0.atan()", name) @staticmethod def atan_fp16x16(): x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64) y = np.arctan(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "atan_fp16x16" make_test([x], y, "input_0.atan()", name)
https://github.com/gizatechxyz/orion
nodegen/node/binarizer.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl class Binarizer(RunAll): @staticmethod def binarizer_fp8x23(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) threshold = np.float64(1) y = (x > threshold).astype(np.float64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "binarizer_fp8x23" make_node([x], [y], name) make_test([x], y, "TensorTrait::binarizer(@input_0, Option::Some(FixedTrait::new(8388608, false));", name) @staticmethod def binarizer_fp16x16(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) threshold = np.float64(1) y = (x > threshold).astype(np.float64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "binarizer_fp16x16" make_node([x], [y], name) make_test([x], y, "TensorTrait::binarizer(@input_0, Option::Some(FixedTrait::new(65536, false));", name)
https://github.com/gizatechxyz/orion
nodegen/node/blackman_window.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement def blackman_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore if periodic == 1: N_1 = size else: N_1 = size - 1 ni = np.arange(size, dtype=output_datatype) alpha = 0.42 beta = 0.08 y = np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 2)) / N_1).astype(output_datatype) * (-0.5) y += np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 4)) / N_1) * beta y += alpha return y.astype(output_datatype) class Blackman_window(RunAll): @staticmethod # We test here with fp8x23 implementation. def fp8x23(): args = [3] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) y = blackman_window(*args, np.float64) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) # Define the name of the generated folder. name = "blackman_window_fp8x23" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. name # The name of the generated folder. ) @staticmethod # We test here with fp16x16 implementation. def fp16x16(): print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) args = [3] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) y = blackman_window(*args, np.float16, 1) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # Define the name of the generated folder. name = "blackman_window_fp16x16" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(1))", # The code signature. name # The name of the generated folder. ) # @staticmethod # # We test here with i8 implementation. # def i8(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) # args = [5] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) # y = blackman_window(*args, np.int8) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I8, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "blackman_window_i8" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(1))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with i32 implementation. # def i32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) # y = blackman_window(*args, np.int32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "blackman_window_i32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with u32 implementation. # def u32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) # y = blackman_window(*args, np.uint32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.U32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "blackman_window_u32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # )
https://github.com/gizatechxyz/orion
nodegen/node/ceil.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Ceil(RunAll): @staticmethod def ceil_fp8x23(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.ceil(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "ceil_fp8x23" make_test([x], y, "input_0.ceil()", name) @staticmethod def ceil_fp16x16(): x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64) y = np.ceil(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "ceil_fp16x16" make_test([x], y, "input_0.ceil()", name)
https://github.com/gizatechxyz/orion
nodegen/node/clip.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Clip(RunAll): @staticmethod def clip_u32(): def clip_2D(): x = np.random.randint(0, 255, (2, 4)).astype(np.uint32) y = np.clip(x, np.uint32(10), np.uint32(20)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "clip_u32_2d" make_test( [x], y, "input_0.clip(Option::Some(10_u32), Option::Some(20_u32))", name) def clip_3D(): x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32) y = np.clip(x, np.uint32(10), np.uint32(20)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "clip_u32_3d" make_test( [x], y, "input_0.clip(Option::Some(10_u32), Option::Some(20_u32))", name) clip_2D() clip_3D() @staticmethod def clip_i32(): def clip_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int32) y = np.clip(x, np.int32(-10), np.int32(20)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "clip_i32_2d" make_test( [x], y, "input_0.clip(Option::Some(-10_i32), Option::Some(20_i32))", name) def clip_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32) y = np.clip(x, np.int32(-10), np.int32(20)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "clip_i32_3d" make_test( [x], y, "input_0.clip(Option::Some(-10_i32), Option::Some(20_i32))", name) clip_2D() clip_3D() @staticmethod def clip_i8(): def clip_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int8) y = np.clip(x, np.int8(-10), np.int8(20)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "clip_i8_2d" make_test( [x], y, "input_0.clip(Option::Some(-10_i8), Option::Some(20_i8))", name) def clip_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8) y = np.clip(x, np.int8(-10), np.int8(20)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "clip_i8_3d" make_test( [x], y, "input_0.clip(Option::Some(-10_i8), Option::Some(20_i8))", name) clip_2D() clip_3D() @staticmethod def clip_fp8x23(): def clip_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP8x23) y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP8x23), to_fp(np.int64(20), FixedImpl.FP8x23)) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "clip_fp8x23_2d" make_test( [x], y, "input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false }))", name) def clip_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP8x23) y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP8x23), to_fp(np.int64(20), FixedImpl.FP8x23)) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "clip_fp8x23_3d" make_test( [x], y, "input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false }))", name) clip_2D() clip_3D() @staticmethod def clip_fp16x16(): def clip_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP16x16) y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP16x16), to_fp(np.int64(20), FixedImpl.FP16x16)) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "clip_fp16x16_2d" make_test( [x], y, "input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false }))", name) def clip_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP16x16) y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP16x16), to_fp(np.int64(20), FixedImpl.FP16x16)) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "clip_fp16x16_3d" make_test( [x], y, "input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false }))", name) clip_2D() clip_3D()
https://github.com/gizatechxyz/orion
nodegen/node/col2im.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def col2im(data, image_shape, block_shape, dilations=None, pads=None, strides=None): # type: ignore if dilations is None: dilations = [1 for s in image_shape] if pads is None: pads = [0 for s in image_shape] * 2 if strides is None: strides = [1 for s in image_shape] bl = np.prod(block_shape) C = data.shape[1] // bl data = data.reshape(data.shape[:1] + (C,) + (bl,) + data.shape[2:]) ks = tuple(block_shape) res = None for n in range(data.shape[0]): for c in range(data.shape[1]): out = col2im_naive_implementation( data[n, c, ...], image_shape, ks, dilations, pads, strides ) if res is None: new_shape = data.shape[:2] + out.shape res = np.empty(new_shape, dtype=data.dtype) res[n, c, ...] = out return (res,) # type: ignore def _get_indices(i, shape): res = np.empty((len(shape),), dtype=np.int64) k = len(shape) - 1 while k > 0: m = i % shape[k] res[k] = m i -= m i /= shape[k] k -= 1 res[0] = i return res def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore n_input_plane = X.shape[0] kernel_size = np.prod(kernel_shape) if n_input_plane % kernel_size != 0: raise ValueError( f"Expected size of input's dimension 1 to be divisible by the " f"product of kernel_size={kernel_size}, " f"but got input.size(1)={n_input_plane} " f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}." ) input_length = X.shape[1] n_dims = len(output_shape) n_blocks = [] for i in range(n_dims): n_block = ( output_shape[i] + pads[i, :].sum() - dilations[i] * (kernel_shape[i] - 1) - 1 ) // strides[i] + 1 n_blocks.append(n_block) block_size = np.prod(n_blocks) if input_length != block_size: raise ValueError( f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, " f"output_shape={output_shape}, kernel_shape={kernel_shape}, " f"dilations={dilations}, pads={pads}, strides={strides}, " f"expected size of input's dimension 2 to match the calculated number of " f"sliding blocks {n_blocks} = {block_size}, " f"but got input.size(2)={input_length}.", ) def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides): # type: ignore n_dims = len(pads) // 2 new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) _col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides) data_col = data data_im = np.zeros(image_shape, dtype=data.dtype) dim_col = [] for i in range(n_dims): col = ( image_shape[i] + new_pads[i, :].sum() - (dilations[i] * (kernel_shape[i] - 1) + 1) ) // strides[i] + 1 dim_col.append(col) kernel_size = np.prod(kernel_shape) col_size = np.prod(dim_col) for c_col in range(kernel_size): offset = _get_indices(c_col, kernel_shape) for col in range(col_size): ind_col = _get_indices(col, dim_col) ind_im = [] for i in range(n_dims): ind = ( ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i] ) ind_im.append(ind) if not _is_out(ind_im, data_im.shape): data_im[tuple(ind_im)] += data_col[c_col, col] return data_im def _is_out(ind, shape): for i, s in zip(ind, shape): if i < 0: return True if i >= s: return True return False class Col2im(RunAll): @staticmethod def export_col2im() -> None: x = np.array( [ [ [1.0, 6.0, 11.0, 16.0, 21.0], # (1, 5, 5) [2.0, 7.0, 12.0, 17.0, 22.0], [3.0, 8.0, 13.0, 18.0, 23.0], [4.0, 9.0, 14.0, 19.0, 24.0], [5.0, 0.0, 15.0, 20.0, 25.0], ] ] ).astype(np.float32) image_shape = np.array([5, 5]).astype(np.int64) block_shape = np.array([1, 5]).astype(np.int64) y = col2im(x,image_shape,block_shape) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "col2im" func_sig = "NNTrait::col2im(" func_sig += "@input_0," func_sig += "array![5, 5].span()," func_sig += "array![1, 5].span()," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x], y, func_sig, name, Trait.NN) @staticmethod def export_col2im_strides() -> None: x = np.array( [ [ [0.0, 0.0, 0.0, 0.0], # (1, 9, 4) [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], ] ] ).astype(np.float32) image_shape = np.array([5, 5]).astype(np.int64) block_shape = np.array([3, 3]).astype(np.int64) y = col2im(x,image_shape,block_shape,strides=[2, 2]) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "col2im_strides" func_sig = "NNTrait::col2im(" func_sig += "@input_0," func_sig += "array![5, 5].span()," func_sig += "array![3, 3].span()," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![2, 2].span()))" make_test( [x], y, func_sig, name, Trait.NN) @staticmethod def export_col2im_pads() -> None: x = np.array( [ [ [ 1.0, 6.0, 11.0, 16.0, 21.0, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, ], # (1, 5, 15) [ 2.0, 7.0, 12.0, 17.0, 22.0, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, ], [ 3.0, 8.0, 13.0, 18.0, 23.0, 28, 33, 38, 43, 48, 53, 58, 63, 68, 73, ], [ 4.0, 9.0, 14.0, 19.0, 24.0, 29, 34, 39, 44, 49, 54, 59, 64, 69, 74, ], [ 5.0, 10.0, 15.0, 20.0, 25.0, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, ], ] ] ).astype(np.float32) image_shape = np.array([5, 5]).astype(np.int64) block_shape = np.array([1, 5]).astype(np.int64) y = col2im(x,image_shape,block_shape,pads=[0, 1, 0, 1]) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "col2im_pads" func_sig = "NNTrait::col2im(" func_sig += "@input_0," func_sig += "array![5, 5].span()," func_sig += "array![1, 5].span()," func_sig += "Option::None," func_sig += "Option::Some(array![0, 1, 0, 1].span())," func_sig += "Option::None)" make_test( [x], y, func_sig, name, Trait.NN) @staticmethod def export_col2im_dilations() -> None: x = np.array( [ [ [1.0, 5.0, 9.0, 13.0, 17], # (1, 4, 5) [2.0, 6.0, 10.0, 14.0, 18], [3.0, 7.0, 11.0, 15.0, 19], [4.0, 8.0, 12.0, 16.0, 20], ] ] ).astype(np.float32) image_shape = np.array([6, 6]).astype(np.int64) block_shape = np.array([2, 2]).astype(np.int64) y = col2im(x,image_shape,block_shape, dilations=[1, 5]) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "col2im_dilations" func_sig = "NNTrait::col2im(" func_sig += "@input_0," func_sig += "array![6, 6].span()," func_sig += "array![2, 2].span()," func_sig += "Option::Some(array![1, 5].span())," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x], y, func_sig, name, Trait.NN) @staticmethod def export_col2im_5D() -> None: x = np.array( [ [ [1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56], # (1, 10, 12) [2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57], [3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58], [4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59], [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], [61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116], [62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117], [63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118], [64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119], [65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120], ] ] ).astype(np.float32) image_shape = np.array([3, 4, 5]).astype(np.int64) block_shape = np.array([1, 1, 5]).astype(np.int64) y = col2im(x,image_shape,block_shape) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "col2im_5D" func_sig = "NNTrait::col2im(" func_sig += "@input_0," func_sig += "array![3, 4, 5].span()," func_sig += "array![1, 1, 5].span()," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x], y, func_sig, name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/compress.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Compress(RunAll): @staticmethod def compress_fp16x16(): def compress_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "compress_fp16x16_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,180).reshape(3,4,3,5).astype(np.int64) x2 = np.array([1, 1, 1, 0]).astype(np.int64) y = x1.compress(x2, axis=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "compress_fp16x16_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,48).reshape(4,3,4).astype(np.int64) x2 = np.array([1, 0, 1, 1]).astype(np.int64) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "compress_fp16x16_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) def axis3(): x1 = np.arange(0,96).reshape(4,3,4, 2).astype(np.int64) x2 = np.array([1, 0]).astype(np.int64) y = x1.compress(x2, axis=3) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "compress_fp16x16_3d_axis3" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))", name= name) def noaxis(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([1, 0, 1, 0, 1, 1, 1, 1, 1]).astype(np.int64) y = x1.compress(x2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "compress_fp16x16_3d_noaxis" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::None(()))", name= name) default() axis1() axis2() axis3() noaxis() compress_3D() @staticmethod def compress_fp8x23(): def compress_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=0) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "compress_fp8x23_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=1) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "compress_fp8x23_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "compress_fp8x23_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) default() axis1() axis2() compress_3D() @staticmethod def compress_i8(): def compress_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) x2 = np.array([0, 1, 1]).astype(np.uint8) y = x1.compress(x2, axis=0) x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "compress_i8_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) x2 = np.array([0, 1, 1]).astype(np.uint8) y = x1.compress(x2, axis=1) x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "compress_i8_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) x2 = np.array([0, 1, 1]).astype(np.uint8) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "compress_i8_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) default() axis1() axis2() compress_3D() @staticmethod def compress_i32(): def compress_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) x2 = np.array([0, 1, 1]).astype(np.int32) y = x1.compress(x2, axis=0) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "compress_i32_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) x2 = np.array([0, 1, 1]).astype(np.int32) y = x1.compress(x2, axis=1) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "compress_i32_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) x2 = np.array([0, 1, 1]).astype(np.int32) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "compress_i32_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) default() axis1() axis2() compress_3D() @staticmethod def compress_u32(): def compress_3D(): def default(): x1 = np.arange(0,48).reshape(4,4,3).astype(np.uint32) x2 = np.array([1, 1]).astype(np.uint32) y = x1.compress(x2, axis=0) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "compress_u32_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=1) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "compress_u32_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,48).reshape(3,4,4).astype(np.uint32) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "compress_u32_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) def axis2_2(): x1 = np.arange(0,60).reshape(3,4,5).astype(np.uint32) x2 = np.array([0, 1, 1]).astype(np.uint32) y = x1.compress(x2, axis=2) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "compress_u32_3d_axis2_2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))", name= name) def axis3(): x1 = np.arange(0,270).reshape(3,3,5,6).astype(np.uint32) x2 = np.array([0, 1, 1,1,0,1]).astype(np.uint32) y = x1.compress(x2, axis=3) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "compress_u32_3d_axis3" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))", name= name) default() axis1() axis2() axis2_2() axis3() compress_3D()
https://github.com/gizatechxyz/orion
nodegen/node/concat.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait # 687 class Concat(RunAll): @staticmethod def concat_u32(): def concat_1D(): x1 = np.arange(0,3).astype(np.uint32) x2 = np.arange(3,6).astype(np.uint32) y = np.concatenate((x1, x2)) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_1d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_2D(): x1 = np.arange(0,4).astype(np.uint32).reshape(2,2) x2 = np.arange(4,8).astype(np.uint32).reshape(2,2) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_2d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_3D(): def default(): x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def axis_1(): x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=1) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_3d_axis_1" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)", name= name, trait= Trait.TENSOR) def axis_2(): x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=2) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_3d_axis_2" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)", name= name, trait= Trait.TENSOR) def three_tensors_axis_1(): x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3) x3 = np.arange(54,81).astype(np.uint32).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=1) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_3d_three_tensors_axis_1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)", name= name, trait= Trait.TENSOR) def three_tensors_axis_2(): x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3) x3 = np.arange(54,81).astype(np.uint32).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=2) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "concat_u32_3d_three_tensors_axis_2" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)", name= name, trait= Trait.TENSOR) default() axis_1() axis_2() three_tensors_axis_1() three_tensors_axis_2() concat_1D() concat_2D() concat_3D() @staticmethod def concat_i32(): def concat_1D(): x1 = np.arange(0,3).astype(np.int32) x2 = np.arange(3,6).astype(np.int32) y = np.concatenate((x1, x2)) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_1d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR.TENSOR) def concat_2D(): x1 = np.arange(0,4).astype(np.int32).reshape(2,2) x2 = np.arange(4,8).astype(np.int32).reshape(2,2) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_2d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_3D(): def default(): x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def axis_1(): x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=1) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_3d_axis_1" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)", name= name, trait= Trait.TENSOR) def axis_2(): x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3) y = np.concatenate((x1, x2), axis=2) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_3d_axis_2" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)", name= name, trait= Trait.TENSOR) def three_tensors_axis_1(): x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int32).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=1) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.I32, x3.shape, x3.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_3d_three_tensors_axis_1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)", name= name, trait= Trait.TENSOR) def three_tensors_axis_2(): x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int32).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=2) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.I32, x3.shape, x3.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "concat_i32_3d_three_tensors_axis_2" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)", name= name, trait= Trait.TENSOR) default() axis_1() axis_2() three_tensors_axis_1() three_tensors_axis_2() concat_1D() concat_2D() concat_3D() @staticmethod def concat_i8(): def concat_1D(): x1 = np.arange(0,3).astype(np.int8) x2 = np.arange(3,6).astype(np.int8) y = np.concatenate((x1, x2)) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_1d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR.TENSOR) def concat_2D(): x1 = np.arange(0,4).astype(np.int8).reshape(2,2) x2 = np.arange(4,8).astype(np.int8).reshape(2,2) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_2d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_3D(): def default(): x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def axis_1(): x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3) y = np.concatenate((x1, x2), axis=1) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_3d_axis_1" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)", name= name, trait= Trait.TENSOR) def axis_2(): x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3) y = np.concatenate((x1, x2), axis=2) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_3d_axis_2" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)", name= name, trait= Trait.TENSOR) def three_tensors_axis_1(): x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int8).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=1) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, x3.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_3d_three_tensors_axis_1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)", name= name, trait= Trait.TENSOR) def three_tensors_axis_2(): x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int8).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=2) x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten()) x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, x3.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "concat_i8_3d_three_tensors_axis_2" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)", name= name, trait= Trait.TENSOR) default() axis_1() axis_2() three_tensors_axis_1() three_tensors_axis_2() concat_1D() concat_2D() concat_3D() @staticmethod def concat_fp8x23(): def concat_1D(): x1 = np.arange(0,3).astype(np.int64) x2 = np.arange(3,6).astype(np.int64) y = np.concatenate((x1, x2)) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_1d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR.TENSOR) def concat_2D(): x1 = np.arange(0,4).astype(np.int64).reshape(2,2) x2 = np.arange(4,8).astype(np.int64).reshape(2,2) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_2d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_3D(): def default(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape,to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def axis_1(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=1) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_3d_axis_1" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)", name= name, trait= Trait.TENSOR) def axis_2(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=2) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_3d_axis_2" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)", name= name, trait= Trait.TENSOR) def three_tensors_axis_1(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=1) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) x3 = Tensor(Dtype.FP8x23, x3.shape,to_fp( x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_3d_three_tensors_axis_1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)", name= name, trait= Trait.TENSOR) def three_tensors_axis_2(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=2) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp( x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp( x2.flatten(), FixedImpl.FP8x23)) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp( x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "concat_fp8x23_3d_three_tensors_axis_2" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)", name= name, trait= Trait.TENSOR) default() axis_1() axis_2() three_tensors_axis_1() three_tensors_axis_2() concat_1D() concat_2D() concat_3D() staticmethod def concat_fp16x16(): def concat_1D(): x1 = np.arange(0,3).astype(np.int64) x2 = np.arange(3,6).astype(np.int64) y = np.concatenate((x1, x2)) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_1d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR.TENSOR) def concat_2D(): x1 = np.arange(0,4).astype(np.int64).reshape(2,2) x2 = np.arange(4,8).astype(np.int64).reshape(2,2) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_2d" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def concat_3D(): def default(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)", name= name, trait= Trait.TENSOR) def axis_1(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape ,to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_3d_axis_1" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)", name= name, trait= Trait.TENSOR) def axis_2(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2), axis=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_3d_axis_2" make_test( inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)", name= name, trait= Trait.TENSOR) def three_tensors_axis_1(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp( x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_3d_three_tensors_axis_1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)", name= name, trait= Trait.TENSOR) def three_tensors_axis_2(): x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3) x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3) x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3) y = np.concatenate((x1, x2, x3), axis=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp( x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp( x2.flatten(), FixedImpl.FP16x16)) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp( x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape,to_fp( y.flatten(), FixedImpl.FP16x16)) name = "concat_fp16x16_3d_three_tensors_axis_2" make_test( inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)", name= name, trait= Trait.TENSOR) default() axis_1() axis_2() three_tensors_axis_1() three_tensors_axis_2() concat_1D() concat_2D() concat_3D()
https://github.com/gizatechxyz/orion
nodegen/node/concat_from_sequence.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Concat_from_sequence(RunAll): @staticmethod def concat_from_sequence_u32(): def new_axis_zero(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_u32_new_axis_zero" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE) def new_axis_one(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(1) concatenated_tensor = np.stack(values_array, axis) concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_u32_new_axis_one" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE) def new_axis_default(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_u32_new_axis_default" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE) new_axis_zero() new_axis_one() new_axis_default() @staticmethod def concat_from_sequence_i32(): def new_axis_zero(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i32_new_axis_zero" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE) def new_axis_one(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(1) concatenated_tensor = np.stack(values_array, axis) concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i32_new_axis_one" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE) def new_axis_default(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i32_new_axis_default" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE) new_axis_zero() new_axis_one() new_axis_default() @staticmethod def concat_from_sequence_i8(): def new_axis_zero(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i8_new_axis_zero" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE) def new_axis_one(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(1) concatenated_tensor = np.stack(values_array, axis) concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i8_new_axis_one" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE) def new_axis_default(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten()) name = "concat_from_sequence_i8_new_axis_default" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE) new_axis_zero() new_axis_one() new_axis_default() @staticmethod def concat_from_sequence_fp8x23(): def new_axis_zero(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23)) name = "concat_from_sequence_fp8x23_new_axis_zero" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE) def new_axis_one(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(1) concatenated_tensor = np.stack(values_array, axis) concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23)) name = "concat_from_sequence_fp8x23_new_axis_one" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE) def new_axis_default(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23)) name = "concat_from_sequence_fp8x23_new_axis_default" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE) new_axis_zero() new_axis_one() new_axis_default() @staticmethod def concat_from_sequence_fp16x16(): def new_axis_zero(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16)) name = "concat_from_sequence_fp16x16_new_axis_zero" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE) def new_axis_one(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(1) concatenated_tensor = np.stack(values_array, axis) concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16)) name = "concat_from_sequence_fp16x16_new_axis_one" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE) def new_axis_default(): sequence = [] values_array = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) values_array.append(values) axis = np.int32(1) new_axis = np.uint32(0) concatenated_tensor = np.concatenate(values_array, axis) concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16)) name = "concat_from_sequence_fp16x16_new_axis_default" make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE) new_axis_zero() new_axis_one() new_axis_default()
https://github.com/gizatechxyz/orion
nodegen/node/conv.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait import numpy as np def r_index_check(r_index, shape_out): for i in range(len(r_index)): if r_index[i] >= shape_out[i]: return False return True def stride(arr): stride = np.zeros(len(arr)) acc = 1 for i in range(len(arr)): stride[i] = acc acc *= arr[-(i + 1)] return np.flip(stride) def conv( X, W, B=None, auto_pad=None, dilations=None, group=None, kernel_shape=None, pads=None, strides=None, ): if dilations is None: dilations = [1 for s in X.shape[2:]] if kernel_shape is None: kernel_shape = W.shape[2:] if pads is None: pads = [0 for s in X.shape[2:]] * 2 if strides is None: strides = [1 for s in X.shape[2:]] if X.shape[1] != W.shape[1] * group or W.shape[0] % group != 0: raise ValueError( f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}, " f"W should be {(W.shape[0], X.shape[1] // group, np.prod(W.shape[1:]) // X.shape[1] * group)}." ) if group > 1: res = [] td = 0 mg = W.shape[0] // group dw = W.shape[1] for b in range(X.shape[0]): for g in range(group): gx = X[b : b + 1, g * dw : (g + 1) * dw] gw = W[g * mg : (g + 1) * mg] try: cv = conv( gx, gw, None, auto_pad, dilations, 1, kernel_shape, pads, strides, ) except (ValueError, RuntimeError) as e: raise ValueError( f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={g}/{group}, " f"gx.shape={gx.shape}, gw.shape={gw.shape}, auto_pad={auto_pad}, " f"dilations={dilations}, kernel_shape={kernel_shape}, pads={pads}, " f"strides={strides}." ) from e if b == 0: td += cv.shape[1] res.append((b, cv)) new_shape = [X.shape[0], *list(res[0][1].shape[1:])] new_shape[1] = td final = np.zeros(tuple(new_shape), dtype=res[0][1].dtype) p = 0 for b, cv in res: final[b : b + 1, p : p + cv.shape[1]] = cv p += cv.shape[1] if p >= final.shape[1]: p = 0 if B is not None: new_shape = [1 for s in final.shape] new_shape[1] = B.shape[0] b = B.reshape(tuple(new_shape)) final += b return final if dilations[0] != 1 or min(dilations) != max(dilations): # Let's compute the dilated kernel. nd = len(dilations) new_kernel_shape = [] new_shape = list(W.shape[:-nd]) for i, d in enumerate(dilations): di = len(W.shape) - nd + i new_shape.append(W.shape[di] + (W.shape[di] - 1) * (d - 1)) new_kernel_shape.append(kernel_shape[i] + (kernel_shape[i] - 1) * (d - 1)) new_w = np.zeros(tuple(new_shape), dtype=W.dtype) indices = [slice(0, new_w.shape[0]), slice(0, new_w.shape[1])] for i, d in enumerate(dilations): di = len(W.shape) - nd + i indices.append(slice(0, new_w.shape[di], d)) new_w[tuple(indices)] = W W = new_w kernel_shape = new_kernel_shape if auto_pad in {"SAME_LOWER", "SAME_UPPER", "VALID"}: head = [] tail = [] for i in range(len(X.shape) - 2): d = X.shape[i] target_size = (d + strides[i] - 1) // strides[i] pad_needed = (target_size - 1) * strides[i] + kernel_shape[i] - d if auto_pad == "SAME_LOWER": pad_head = (pad_needed + 1) // 2 else: pad_head = pad_needed // 2 pad_tail = pad_needed - pad_head head.append(pad_head) tail.append(pad_tail) pads = head + tail if len(X.shape) == 3: sN, sC, sH = X.shape # M, C_group, kH, kW = W.shape (kh,) = kernel_shape (sth,) = strides h_out = int(((sH - kh + pads[0] + pads[1]) / sth) + 1) h0 = pads[0] oh = -1 * (kh % 2) bh = -h0 eh = h_out * sth res = np.zeros((X.shape[0], W.shape[0], h_out)) # type: ignore[assignment] if B is not None: res[:, :, :] += B.reshape((1, -1, 1)) # type: ignore for n in range(0, sN): for nw in range(W.shape[0]): for c in range(0, sC): w = W[nw : nw + 1, c : c + 1] for io in range(bh, eh, sth): hr = (io - bh) // sth if hr >= h_out: continue i = io + kh % 2 ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) img = X[n : n + 1, c : c + 1, ih1:ih2] if img.shape != w.shape: jh1, jh2 = max(-oh - i, 0), min(kh, kh + sH - (i + oh + kh)) w_ = w[:1, :1, jh1:jh2] if img.shape != w_.shape: raise RuntimeError( f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, " f"i={i}, kh={kh}, sH={sH}, sth={sth}." ) s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[ 0, 0 ] # (img * w_).sum() else: s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[ 0, 0 ] # (img * w).sum() res[n, nw, hr] += s # type: ignore return res if len(X.shape) == 4: sN, sC, sH, sW = X.shape # M, C_group, kH, kW = W.shape kh, kw = kernel_shape sth, stw = strides h_out = int(((sH - kh + pads[0] + pads[2]) / sth) + 1) w_out = int(((sW - kw + pads[1] + pads[3]) / stw) + 1) h0, w0 = pads[0], pads[1] oh, ow = -1 * (kh % 2), -1 * (kw % 2) bh, bw = -h0, -w0 eh, ew = h_out * sth, w_out * stw res = np.zeros((X.shape[0], W.shape[0], h_out, w_out)) # type: ignore[assignment] if B is not None: res[:, :, :, :] = B.reshape((1, -1, 1, 1)) # type: ignore for n in range(0, sN): for nw in range(W.shape[0]): for c in range(0, sC): w = W[nw : nw + 1, c : c + 1] for io in range(bh, eh, sth): hr = (io - bh) // sth if hr >= h_out: continue i = io + kh % 2 ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) for jo in range(bw, ew, stw): wr = (jo - bw) // stw if wr >= w_out: continue j = jo + kw % 2 iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW) img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2] if img.shape != w.shape: jh1, jh2 = max(-oh - i, 0), min( kh, kh + sH - (i + oh + kh) ) jw1, jw2 = max(-ow - j, 0), min( kw, kw + sW - (j + ow + kw) ) w_ = w[:1, :1, jh1:jh2, jw1:jw2] if img.shape != w_.shape: raise RuntimeError( f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, " f"i={i}, j={j}, kh={kh}, kw={kw}, sH={sH}, sW={sW}, sth={sth}, stw={stw}." ) s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[ 0, 0 ] # (img * w_).sum() else: s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[ 0, 0 ] # (img * w).sum() res[n, nw, hr, wr] += s # type: ignore return res if len(X.shape) == 5: sN, sC, sH, sW, sZ = X.shape kh, kw, kz = kernel_shape sth, stw, stz = strides h_out = int(((sH - kh + pads[0] + pads[3]) / sth) + 1) w_out = int(((sW - kw + pads[1] + pads[4]) / stw) + 1) z_out = int(((sZ - kz + pads[2] + pads[5]) / stz) + 1) h0, w0, z0 = pads[0], pads[1], pads[2] oh, ow, oz = -1 * (kh % 2), -1 * (kw % 2), -1 * (kz % 2) bh, bw, bz = -h0, -w0, -z0 eh, ew, ez = h_out * sth, w_out * stw, z_out * stz res = np.zeros((X.shape[0], W.shape[0], h_out, w_out, z_out)) # type: ignore[assignment] if B is not None: res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1)) # type: ignore for n in range(0, sN): for nw in range(W.shape[0]): for c in range(0, sC): w = W[nw : nw + 1, c : c + 1] for io in range(bh, eh, sth): hr = (io - bh) // sth if hr >= h_out: continue i = io + kh % 2 ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) for jo in range(bw, ew, stw): wr = (jo - bw) // stw if wr >= w_out: continue j = jo + kw % 2 iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW) for zo in range(bz, ez, stz): zr = (zo - bz) // stz if zr >= z_out: continue z = zo + kz % 2 iz1, iz2 = max(0, z + oz), min(z + oz + kz, sZ) img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2, iz1:iz2] ### ICI if img.shape != w.shape: jh1, jh2 = max(-oh - i, 0), min( kh, kh + sH - (i + oh + kh) ) jw1, jw2 = max(-ow - j, 0), min( kw, kw + sW - (j + ow + kw) ) jz1, jz2 = max(-oz - z, 0), min( kz, kz + sZ - (z + oz + kz) ) w_ = w[:1, :1, jh1:jh2, jw1:jw2, jz1:jz2] if img.shape != w_.shape: raise RuntimeError( f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, oz={oz}, " f"i={i}, j={j}, z={z}, kh={kh}, kw={kw}, kz={kz}, " f"sH={sH}, sW={sW}, sZ={sZ}, sth={sth}, stw={stw}, stz={stz}." ) s = np.dot( img.reshape((1, -1)), w_.reshape((-1, 1)) )[ 0, 0 ] else: s = np.dot( img.reshape((1, -1)), w.reshape((-1, 1)) )[ 0, 0 ] res[n, nw, hr, wr, zr] += s # type: ignore return res else: nd = len(X.shape[2:]) sN, sC = X.shape[:2] x_stride = stride(X.shape) w_stride = stride(W.shape) x_flatten = X.reshape(int(x_stride[0] * X.shape[0])) shape_out = [int(((X.shape[2+i] - kernel_shape[i] + pads[i] + pads[i + nd]) / strides[i]) + 1) for i in range(nd)] o_index = [-1 * (kernel_shape[i] % 2) for i in range(nd)] b_index = [-pads[i] for i in range(nd)] e_index = [shape_out[i] * strides[i] for i in range(nd)] range_len = [e_index[i] - b_index[i] / strides[i] for i in range(nd)] range_stride = stride(range_len) res_shape = [X.shape[0], W.shape[0]] + shape_out res = np.zeros(res_shape) res_strides = stride(res_shape) if B is not None: res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1)) # type: ignore for n in range(0, sN): for nw in range(W.shape[0]): for c in range(0, sC): w = W[nw : nw + 1, c : c + 1] for i in range(int(range_len[0] * range_stride[0])): flatten_index = i io_index = np.zeros(nd) r_index = np.zeros(nd) for nx in range(nd): n_index, rem = divmod(flatten_index, range_stride[nx]) flatten_index = rem io_index[nx] = n_index * strides[nx] + b_index[nx] r_index[nx] = n_index if r_index_check(r_index, shape_out): indices = [io_index[nx] + (kernel_shape[nx] % 2) for nx in range(nd)] i1_index = [max(0, indices[nx] + o_index[nx]) for nx in range(nd)] i2_index = [min(X.shape[2 + nx], indices[nx] + o_index[nx] + kernel_shape[nx]) for nx in range(nd)] idiff_index = [int(i2_index[nx] - i1_index[nx]) for nx in range(nd - 1)] i_stride = stride(idiff_index) img = [] for ii in range(int(i_stride[0] * idiff_index[0])): flatten_index = ii start = n * x_stride[0] + c * x_stride[1] for nx in range(nd - 1): ii_index, rem = divmod(flatten_index, i_stride[nx]) flatten_index = rem start += (i1_index[nx] + ii_index) * x_stride[2 + nx] start += i1_index[nd-1] end = start + (i2_index[nd-1] - i1_index[nd-1]) img.append(x_flatten[int(start):int(end)]) img_shape = [1, 1] + idiff_index w = w.reshape(np.prod(kernel_shape)) if len(img) != len(w): j1_index = [max(0, -indices[nx] - o_index[nx]) for nx in range(nd)] j2_index = [min(X.shape[2 + nx] - indices[nx] - o_index[nx], kernel_shape[nx]) for nx in range(nd)] jdiff_index = [j2_index[nx] - j1_index[nx] for nx in range(nd - 1)] w_ = [] j_stride = stride(jdiff_index) for jj in range(int(j_stride[0] * jdiff_index[0])): flatten_index = jj start = 0 for nx in range(nd): jj_index, rem = divmod(flatten_index, range_stride[nx]) flatten_index = rem start += (j1_index[nx] + jj_index) * kernel_shape[nx] w_.append(w[int(start + j1_index[-1]):int(start + j1_index[-1] + j2_index[nd-1] - j1_index[nd-1])]) img = np.array(img) s = np.dot( np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1)) )[ 0, 0 ] else: img = np.array(img) s = np.dot( np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1)) )[ 0, 0 ] res_index = [] for nx in range(nd): res_index.append(int(r_index[nx])) index = tuple([n, nw]) + tuple(res_index) res[index] += s # type: ignore return res class Conv(RunAll): @staticmethod def export_conv_1D_no_padding() -> None: x = np.array( [ [ [ 0.0, 1.0, 2.0, 3.0, 4.0 ] ] ] ).astype(np.float32) w = np.array( [ [ [ 1.0, 1.0, 1.0 ] ] ] ).astype(np.float32) y = conv(x, w, group = 1) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_1D_no_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_1D_with_padding() -> None: x = np.array( [ [ [ 0.0, 1.0, 2.0, 3.0, 4.0 ] ] ] ).astype(np.float32) w = np.array( [ [ [ 1.0, 1.0, 1.0 ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, pads=[1, 1]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_1D_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![1, 1].span())," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_2D_no_padding() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[0, 0, 0, 0],) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_con_2D_with_padding() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![1, 1, 1, 1].span())," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_3D_no_padding() -> None: x = np.array( [ [ [ [ [ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24] ], [ [ 25, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49] ], [ [ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74] ], [ [ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99] ], [ [100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124] ] ] ] ] ).astype(np.float32) w = np.array( [ [ [ [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ], [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ], [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ] ] ] ] ).astype(np.float32) y = conv(x, w, group = 1) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_3D_no_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_3D_with_padding() -> None: x = np.array( [ [ [ [ [ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24] ], [ [ 25, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49] ], [ [ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74] ], [ [ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99] ], [ [100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124] ] ] ] ] ).astype(np.float32) w = np.array( [ [ [ [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ], [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ], [ [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] ] ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_3D_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1].span())," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_4D_no_padding() -> None: x = np.array( [ [ [ [ [ [ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8] ], [ [ 9, 10, 11],[12, 13, 14],[15, 16, 17] ], [ [18, 19, 20],[21, 22, 23],[24, 25, 26] ] ], [ [ [27, 28, 29],[30, 31, 32],[33, 34, 35] ], [ [36, 37, 38],[39, 40, 41],[42, 43, 44] ], [ [45, 46, 47],[48, 49, 50],[51, 52, 53] ] ], [ [ [54, 55, 56],[57, 58, 59],[60, 61, 62] ], [ [63, 64, 65],[66, 67, 68],[69, 70, 71] ], [ [72, 73, 74],[75, 76, 77],[78, 79, 80] ] ] ] ] ] ).astype(np.float32) w = np.array( [ [ [ [ [ [1., 1.],[1., 1.] ], [ [1., 1.],[1., 1.] ] ], [ [ [1., 1.],[1., 1.] ], [ [1., 1.],[1., 1.] ] ] ] ] ] ).astype(np.float32) y = conv(x, w, group = 1) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_4D_no_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_4D_with_padding() -> None: x = np.array( [ [ [ [ [ [ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8] ], [ [ 9, 10, 11],[12, 13, 14],[15, 16, 17] ], [ [18, 19, 20],[21, 22, 23],[24, 25, 26] ] ], [ [ [27, 28, 29],[30, 31, 32],[33, 34, 35] ], [ [36, 37, 38],[39, 40, 41],[42, 43, 44] ], [ [45, 46, 47],[48, 49, 50],[51, 52, 53] ] ], [ [ [54, 55, 56],[57, 58, 59],[60, 61, 62] ], [ [63, 64, 65],[66, 67, 68],[69, 70, 71] ], [ [72, 73, 74],[75, 76, 77],[78, 79, 80] ] ] ] ] ] ).astype(np.float32) w = np.array( [ [ [ [ [ [1., 1.],[1., 1.] ], [ [1., 1.],[1., 1.] ] ], [ [ [1., 1.],[1., 1.] ], [ [1., 1.],[1., 1.] ] ] ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1, 1, 1]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # name = "conv_4D_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1, 1, 1].span())," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_with_autopad_same() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, kernel_shape=[3, 3],auto_pad="SAME_LOWER",strides = [2, 2]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_autopad_same" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::Some(AUTO_PAD::SAME_LOWER)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![3, 3].span())," func_sig += "Option::None," func_sig += "Option::Some(array![2, 2].span()))" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_with_strides_asymmetric_padding() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0], [25.0, 26.0, 27.0, 28.0, 29.0], [30.0, 31.0, 32.0, 33.0, 34.0], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 0, 1, 0],strides = [2, 2]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_strides_asymmetric_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![3, 3].span())," func_sig += "Option::Some(array![1, 0, 1, 0].span())," func_sig += "Option::Some(array![2, 2].span()))" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_with_strides_with_padding() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, 24.0], [25.0, 26.0, 27.0, 28.0, 29.0], [30.0, 31.0, 32.0, 33.0, 34.0], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ] ] ] ).astype(np.float32) y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],strides = [2, 2]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_strides_with_padding" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![3, 3].span())," func_sig += "Option::Some(array![1, 1, 1, 1].span())," func_sig += "Option::Some(array![2, 2].span()))" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_conv_with_2_groups() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [ [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]] ] ] ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ], [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv(x, w, group = 2) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_2D_with_2_groups" func_sig = "NNTrait::conv(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(2)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/conv_transpose.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def conv_transpose( X, W, B=None, auto_pad=None, dilations=None, group=None, kernel_shape=None, output_padding=None, output_shape=None, pads=None, strides=None, ): if dilations is None: dilations = [1 for s in X.shape[2:]] if kernel_shape is None: kernel_shape = W.shape[2:] if output_padding is None: output_padding = [0 for s in X.shape[2:]] * 2 if strides is None: strides = [1 for s in X.shape[2:]] if pads is None and auto_pad not in {"SAME_UPPER", "SAME_LOWER"}: pads = [0 for i in range(2 * len(strides))] if pads is None: if output_shape is None: output_shape = [ X.shape[i + 2] * strides[i] for i in range(len(strides)) ] total_padding = [ strides[i] * (X.shape[i + 2] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i] for i in range(len(output_shape)) ] pads_1 = [] pads_2 = [] for i in range(len(output_shape)): if auto_pad == "SAME_UPPER": pads_1.append(total_padding[i] // 2) pads_2.append(total_padding[i] - (total_padding[i] // 2)) else: pads_1.append(total_padding[i] - (total_padding[i] // 2)) pads_2.append(total_padding[i] // 2) pads = pads_1 + pads_2 n_dims = len(pads) // 2 else: n_dims = len(X.shape) - 2 new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) if output_shape is None: output_shape = [ strides[i] * (X.shape[i + 2] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - new_pads[i, :].sum() for i in range(n_dims) ] kernel_shape = W.shape[2:] kernel_size = np.prod(kernel_shape) num_output_channels = W.shape[1] * group kernel_dim = num_output_channels // group * kernel_size C = X.shape[1] # num_inputs_channels m = kernel_dim # kernel_dim n = np.prod(X.shape[2:]) # input_image_size k = C // group w_reshaped = W.reshape((group, k, m)) final = None # N x C x H x W = X.shape # C x M/group x k1 x k2 = W.shape if group == 1: for image_id in range(X.shape[0]): w_t = w_reshaped[0].T gemm = np.matmul(w_t, X[image_id].reshape((k, n))) gemmc = gemm.reshape((num_output_channels, -1, gemm.shape[-1])) for c in range(num_output_channels): res = col2im_naive_implementation( gemmc[c], output_shape, kernel_shape, dilations, pads, strides ) if final is None: final = np.empty( X.shape[:1] + (num_output_channels,) + res.shape, dtype=X.dtype, ) if B is not None: res += B[c] final[image_id, c, ...] = res[...] else: final = np.zeros((X.shape[0], num_output_channels ) + tuple(output_shape)) output_array = [] for group_id in range(group): group_X = X[:, group_id * C // group : (group_id + 1) * C // group, ...] group_W = W[group_id * num_output_channels // group : (group_id + 1) * num_output_channels // group, ...] group_output = conv_transpose( group_X, group_W, B=B, auto_pad=auto_pad, dilations=dilations, group=1, kernel_shape=kernel_shape, output_padding=output_padding, output_shape=output_shape, pads=pads, strides=strides, ) group_output = np.array(group_output[0]) output_array.append(group_output) for image_id in range(X.shape[0]): for group_id in range(group): group_output = output_array[group_id] final[image_id, group_id:(group_id+1), ...] = group_output[image_id, ...] return (final.astype(X.dtype),) def _get_indices(i, shape): res = np.empty((len(shape),), dtype=np.int64) k = len(shape) - 1 while k > 0: m = i % shape[k] res[k] = m i -= m i /= shape[k] k -= 1 res[0] = i return res def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore n_input_plane = X.shape[0] kernel_size = np.prod(kernel_shape) if n_input_plane % kernel_size != 0: raise ValueError( f"Expected size of input's dimension 1 to be divisible by the " f"product of kernel_size={kernel_size}, " f"but got input.size(1)={n_input_plane} " f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}." ) input_length = X.shape[1] n_dims = len(output_shape) n_blocks = [] for i in range(n_dims): n_block = ( output_shape[i] + pads[i, :].sum() - dilations[i] * (kernel_shape[i] - 1) - 1 ) // strides[i] + 1 n_blocks.append(n_block) block_size = np.prod(n_blocks) if input_length != block_size: raise ValueError( f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, " f"output_shape={output_shape}, kernel_shape={kernel_shape}, " f"dilations={dilations}, pads={pads}, strides={strides}, " f"expected size of input's dimension 2 to match the calculated number of " f"sliding blocks {n_blocks} = {block_size}, " f"but got input.size(2)={input_length}.", ) def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides): # type: ignore n_dims = len(pads) // 2 new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) _col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides) data_col = data data_im = np.zeros(image_shape, dtype=data.dtype) dim_col = [] for i in range(n_dims): col = ( image_shape[i] + new_pads[i, :].sum() - (dilations[i] * (kernel_shape[i] - 1) + 1) ) // strides[i] + 1 dim_col.append(col) kernel_size = np.prod(kernel_shape) col_size = np.prod(dim_col) for c_col in range(kernel_size): offset = _get_indices(c_col, kernel_shape) for col in range(col_size): ind_col = _get_indices(col, dim_col) ind_im = [] for i in range(n_dims): ind = ( ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i] ) ind_im.append(ind) if not _is_out(ind_im, data_im.shape): data_im[tuple(ind_im)] += data_col[c_col, col] return data_im def _is_out(ind, shape): for i, s in zip(ind, shape): if i < 0: return True if i >= s: return True return False class Conv_transpose(RunAll): @staticmethod def export_conv_transpose() -> None: x = np.array( [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=1)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_1d() -> None: x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3) w = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype( # (1, 2, 3) np.float32 ) y = conv_transpose(x, w, group=1)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_1d" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_3d() -> None: x = np.array( [ [ [ [ [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5) [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], ], [ [20.0, 21.0, 22.0, 23.0, 24.0], [25.0, 26.0, 27.0, 28.0, 29.0], [30.0, 31.0, 32.0, 33.0, 34.0], [35.0, 36.0, 37.0, 38.0, 39.0], ], [ [40.0, 41.0, 42.0, 43.0, 44.0], [45.0, 46.0, 47.0, 48.0, 49.0], [50.0, 51.0, 52.0, 53.0, 54.0], [55.0, 56.0, 57.0, 58.0, 59.0], ], ] ] ] ).astype(np.float32) w = np.array( [ [ [ [ [1.0, 1.0, 1.0], # (1, 2, 3, 3, 3) [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], ], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ], [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=1)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_3d" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_attributes() -> None: x = np.array( [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=1)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_attributes" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_pads() -> None: x = np.array( [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=1,strides=[3, 2],output_shape=[10, 8], kernel_shape=[3, 3], output_padding=[1, 1],)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_pads" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![3, 3].span())," func_sig += "Option::Some(array![1, 1].span())," func_sig += "Option::Some(array![10, 8].span())," func_sig += "Option::None," func_sig += "Option::Some(array![3, 2].span()))" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_dilations() -> None: x = np.array( [[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]] # (1, 1, 3, 3) ).astype(np.float32) w = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32) # (1, 1, 2, 2) y = conv_transpose(x, w, group=1, dilations=[2, 2])[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_dilations" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![2, 2].span())," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None,)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_autopad_same() -> None: x = np.array( [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=1, auto_pad="SAME_UPPER", strides=[2, 2])[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_autopad_same" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::Some(AUTO_PAD::SAME_UPPER)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(array![2, 2].span()))" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_group_2() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [ [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]] ] ] ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ], [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=2)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_group_2" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(2)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None,)" make_test( [x, w], y, func_sig, name, Trait.NN) @staticmethod def export_convtranspose_group_2_image_3() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [ [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] ] ], [ [ [18.0, 19.0, 20.0], [21.0, 22.0, 23.0], [24.0, 25.0, 26.0] ], [ [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] ] ], [ [ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0] ], [ [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] ] ] ] ).astype(np.float32) w = np.array( [ [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ], [ [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ] ] ).astype(np.float32) y = conv_transpose(x, w, group=2)[0] x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "conv_transpose_group_2_image_3" func_sig = "NNTrait::conv_transpose(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(2)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None,)" make_test( [x, w], y, func_sig, name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/cos.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Cos(RunAll): @staticmethod def cos_fp8x23(): x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64) y = np.cos(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cos_fp8x23" make_test([x], y, "input_0.cos()", name) @staticmethod def cos_fp16x16(): x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64) y = np.cos(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cos_fp16x16" make_test([x], y, "input_0.cos()", name)
https://github.com/gizatechxyz/orion
nodegen/node/cosh.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Cosh(RunAll): @staticmethod def cosh_fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.cosh(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cosh_fp8x23" make_test([x], y, "input_0.cosh()", name) @staticmethod def cosh_fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.cosh(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cosh_fp16x16" make_test([x], y, "input_0.cosh()", name)
https://github.com/gizatechxyz/orion
nodegen/node/cumsum.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Cumsum(RunAll): @staticmethod def cumsum_u32(): def cumsum_1D(): def default(): x = np.array([1, 2, 3, 4, 5]).astype(np.uint32) y = np.array([1, 3, 6, 10, 15]).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_1d_default" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.uint32) y = np.array([0, 1, 3, 6, 10]).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_1d_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name) def reverse(): x = np.array([1, 2, 3, 4, 5]).astype(np.uint32) y = np.array([15, 14, 12, 9, 5]).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_1d_reverse" make_test( [x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name) def reverse_exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.uint32) y = np.array([14, 12, 9, 5, 0]).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_1d_reverse_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name) default() exclusive() reverse() reverse_exclusive() cumsum_1D() def cumsum_2D(): def axis_0(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.uint32).reshape((2, 3)) y = np.array([1, 2, 3, 5, 7, 9]).astype( np.uint32).reshape((2, 3)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_2d_axis_0" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def axis_1(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.uint32).reshape((2, 3)) y = np.array([1, 3, 6, 4, 9, 15]).astype( np.uint32).reshape((2, 3)) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "cumsum_u32_2d_axis_1" make_test( [x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name) axis_0() axis_1() cumsum_2D() @staticmethod def cumsum_i32(): def cumsum_1D(): def default(): x = np.array([1, 2, 3, 4, 5]).astype(np.int32) y = np.array([1, 3, 6, 10, 15]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_1d_default" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int32) y = np.array([0, 1, 3, 6, 10]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_1d_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name) def reverse(): x = np.array([1, 2, 3, 4, 5]).astype(np.int32) y = np.array([15, 14, 12, 9, 5]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_1d_reverse" make_test( [x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name) def reverse_exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int32) y = np.array([14, 12, 9, 5, 0]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_1d_reverse_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name) default() exclusive() reverse() reverse_exclusive() cumsum_1D() def cumsum_2D(): def axis_0(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int32).reshape((2, 3)) y = np.array([1, 2, 3, 5, 7, 9]).astype( np.int32).reshape((2, 3)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_2d_axis_0" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def axis_1(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int32).reshape((2, 3)) y = np.array([1, 3, 6, 4, 9, 15]).astype( np.int32).reshape((2, 3)) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "cumsum_i32_2d_axis_1" make_test( [x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name) axis_0() axis_1() cumsum_2D() @staticmethod def cumsum_i8(): def cumsum_1D(): def default(): x = np.array([1, 2, 3, 4, 5]).astype(np.int8) y = np.array([1, 3, 6, 10, 15]).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_1d_default" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int8) y = np.array([0, 1, 3, 6, 10]).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_1d_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name) def reverse(): x = np.array([1, 2, 3, 4, 5]).astype(np.int8) y = np.array([15, 14, 12, 9, 5]).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_1d_reverse" make_test( [x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name) def reverse_exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int8) y = np.array([14, 12, 9, 5, 0]).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_1d_reverse_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name) default() exclusive() reverse() reverse_exclusive() cumsum_1D() def cumsum_2D(): def axis_0(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int8).reshape((2, 3)) y = np.array([1, 2, 3, 5, 7, 9]).astype( np.int8).reshape((2, 3)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_2d_axis_0" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def axis_1(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int8).reshape((2, 3)) y = np.array([1, 3, 6, 4, 9, 15]).astype( np.int8).reshape((2, 3)) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "cumsum_i8_2d_axis_1" make_test( [x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name) axis_0() axis_1() cumsum_2D() @staticmethod def cumsum_fp8x23(): def cumsum_1D(): def default(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([1, 3, 6, 10, 15]).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_1d_default" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([0, 1, 3, 6, 10]).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_1d_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name) def reverse(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([15, 14, 12, 9, 5]).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_1d_reverse" make_test( [x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name) def reverse_exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([14, 12, 9, 5, 0]).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_1d_reverse_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name) default() exclusive() reverse() reverse_exclusive() cumsum_1D() def cumsum_2D(): def axis_0(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int64).reshape((2, 3)) y = np.array([1, 2, 3, 5, 7, 9]).astype( np.int64).reshape((2, 3)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_2d_axis_0" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def axis_1(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int64).reshape((2, 3)) y = np.array([1, 3, 6, 4, 9, 15]).astype( np.int64).reshape((2, 3)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "cumsum_fp8x23_2d_axis_1" make_test( [x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name) axis_0() axis_1() cumsum_2D() @staticmethod def cumsum_fp16x16(): def cumsum_1D(): def default(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([1, 3, 6, 10, 15]).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_1d_default" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([0, 1, 3, 6, 10]).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_1d_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name) def reverse(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([15, 14, 12, 9, 5]).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_1d_reverse" make_test( [x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name) def reverse_exclusive(): x = np.array([1, 2, 3, 4, 5]).astype(np.int64) y = np.array([14, 12, 9, 5, 0]).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_1d_reverse_exclusive" make_test( [x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name) default() exclusive() reverse() reverse_exclusive() cumsum_1D() def cumsum_2D(): def axis_0(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int64).reshape((2, 3)) y = np.array([1, 2, 3, 5, 7, 9]).astype( np.int64).reshape((2, 3)) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_2d_axis_0" make_test( [x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name) def axis_1(): x = np.array([1, 2, 3, 4, 5, 6]).astype( np.int64).reshape((2, 3)) y = np.array([1, 3, 6, 4, 9, 15]).astype( np.int64).reshape((2, 3)) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "cumsum_fp16x16_2d_axis_1" make_test( [x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name) axis_0() axis_1() cumsum_2D()
https://github.com/gizatechxyz/orion
nodegen/node/depth_to_space.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def depth_to_space(data: np.ndarray, blocksize: int = 2, mode = "DCR") -> np.ndarray: if len(data.shape) != 4: raise RuntimeError(f"Unexpected shape {data.shape!r}.") b, c, h, w = data.shape if mode == "DCR": tmpshape = ( b, blocksize, blocksize, c // (blocksize * blocksize), h, w, ) reshaped = data.reshape(tmpshape) transposed = np.transpose(reshaped, [0, 3, 4, 1, 5, 2]) else: # assert mode == "CRD" tmpshape = ( b, c // (blocksize * blocksize), blocksize, blocksize, h, w, ) reshaped = data.reshape(tmpshape) transposed = np.transpose(reshaped, [0, 1, 4, 2, 5, 3]) finalshape = ( b, c // (blocksize * blocksize), h * blocksize, w * blocksize, ) y = np.reshape(transposed, finalshape) return y class Depth_to_space(RunAll): @staticmethod def fp8x23(): x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64) y = depth_to_space(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "depth_to_space_fp8x23" make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", name, Trait.NN) @staticmethod def fp16x16(): x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float16) y = depth_to_space(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "depth_to_space_fp16x16" make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", name, Trait.NN) # @staticmethod # def fp64x64(): # x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64) # y = depth_to_space(x) # x = Tensor(Dtype.FP64x64, x.shape, to_fp( # x.flatten(), FixedImpl.FP64x64)) # y = Tensor(Dtype.FP64x64, y.shape, to_fp( # y.flatten(), FixedImpl.FP64x64)) # name = "depth_to_space_fp64x64" # make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", # name, Trait.NN) @staticmethod def fpi8(): x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int8) y = depth_to_space(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "depth_to_space_i8" make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", name, Trait.NN) @staticmethod def fpi32(): x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int32) y = depth_to_space(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "depth_to_space_i32" make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')", name, Trait.NN) @staticmethod def fpu32(): x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.uint32) y = depth_to_space(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "depth_to_space_u32" make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/div.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Div(RunAll): @staticmethod def div_u32(): def default(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(1, 3, (3, 3, 3)).astype(np.uint32) z = x / y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "div_u32" make_test([x, y], z, "input_0 / input_1", name) def broadcast(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(1, 3, (1, 3, 1)).astype(np.uint32) z = x / y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "div_u32_broadcast" make_test([x, y], z, "input_0 / input_1", name) default() broadcast() @staticmethod def div_i32(): def default(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32) z = x / y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "div_i32" make_test([x, y], z, "input_0 / input_1", name) def broadcast(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(1, 3, (1, 3, 1)).astype(np.int32) z = x / y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "div_i32_broadcast" make_test([x, y], z, "input_0 / input_1", name) default() broadcast() @staticmethod def div_i8(): def default(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8) z = x / y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "div_i8" make_test([x, y], z, "input_0 / input_1", name) def broadcast(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(1, 3, (1, 3, 1)).astype(np.int8) z = x / y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "div_i8_broadcast" make_test([x, y], z, "input_0 / input_1", name) default() broadcast() @staticmethod def div_fp8x23(): def default(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) z = x / y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "div_fp8x23" make_test([x, y], z, "input_0 / input_1", name) def broadcast(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(1, 3, (1, 3, 1)).astype(np.float64) z = x / y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "div_fp8x23_broadcast" make_test([x, y], z, "input_0 / input_1", name) default() broadcast() @staticmethod def div_fp16x16(): def default(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) z = x / y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "div_fp16x16" make_test([x, y], z, "input_0 / input_1", name) def broadcast(): x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(1, 3, (1, 3, 1)).astype(np.float64) z = x / y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "div_fp16x16_broadcast" make_test([x, y], z, "input_0 / input_1", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/equal.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Equal(RunAll): @staticmethod def equal_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32" make_test([x, y], z, "input_0.equal(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) default() broadcast() @staticmethod def equal_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32" make_test([x, y], z, "input_0.equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (1, 2)).astype(np.int32) z = np.equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) default() broadcast() @staticmethod def equal_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8" make_test([x, y], z, "input_0.equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (1, 2)).astype(np.int8) z = np.equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) default() broadcast() @staticmethod def equal_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23" make_test([x, y], z, "input_0.equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) default() broadcast() @staticmethod def equal_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16" make_test([x, y], z, "input_0.equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/erf.py
import numpy as np from math import erf from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Erf(RunAll): @staticmethod def erf_fp8x23(): x = np.asarray([0.12, -1.66, 3.4, 4.8, 2.7]).astype(np.float64).reshape(1,5) y = np.asarray([erf(value) for value in x[0]]).astype(np.float64).reshape(1,5) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "erf_fp8x23" make_test([x], y, "input_0.erf()", name) @staticmethod def erf_fp16x16(): x = np.asarray([0.12, -1.66, 3.4, 4.8, 2.7]).astype(np.float64).reshape(1,5) y = np.asarray([erf(value) for value in x[0]]).astype(np.float64).reshape(1,5) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "erf_fp16x16" make_test([x], y, "input_0.erf()", name)
https://github.com/gizatechxyz/orion
nodegen/node/exp.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Exp(RunAll): @staticmethod def exp_fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.exp(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "exp_fp8x23" make_test([x], y, "input_0.exp()", name) @staticmethod def exp_fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.exp(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "exp_fp16x16" make_test([x], y, "input_0.exp()", name)
https://github.com/gizatechxyz/orion
nodegen/node/gather.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Gather(RunAll): @staticmethod def gather_fp16x16(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) y = x1.take(x2, axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_fp16x16_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) y = x1.take(x2, axis=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_fp16x16_3d_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) y = x1.take(x2, axis=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_fp16x16_3d_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", name= name) def negative_indices(): x1 = np.arange(10).astype(np.float32) x2 = np.array([0, -9, -10]).astype(np.int64) y = np.take(x1, x2, axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_negative_indices" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", name= name) def negative_axis(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) y = x1.take(x2, axis=-1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_negative_axis" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(-1))", name= name) default() axis1() axis2() negative_indices() negative_axis()
https://github.com/gizatechxyz/orion
nodegen/node/gather_elements.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def gather_elements(data, indices, axis=0): # type: ignore data_swaped = np.swapaxes(data, 0, axis) index_swaped = np.swapaxes(indices, 0, axis) gathered = np.choose(index_swaped, data_swaped, mode="wrap") y = np.swapaxes(gathered, 0, axis) return y class Gather_elements(RunAll): @staticmethod def gather_elements_fp16x16(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32) y = gather_elements(x1, x2, axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_elements_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", name= name) def axis1(): x1 = np.array([[1, 2], [3, 4]], dtype=np.float32) x2 = np.array([[0, 0], [1, 0]], dtype=np.int32) y = gather_elements(x1, x2, axis=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_elements_axis1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", name= name) def axis2(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.uint32) y = gather_elements(x1, x2, axis=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_elements_axis2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", name= name) def negative_indices(): x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) x2 = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32) y = gather_elements(x1, x2, axis=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_elements_negative_indices" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", name= name) default() axis1() axis2() negative_indices()
https://github.com/gizatechxyz/orion
nodegen/node/gather_nd.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def gather_nd_impl( data: np.ndarray, indices: np.ndarray, batch_dims: int ) -> np.ndarray: # Note the data rank - will be reused multiple times later data_rank = len(data.shape) # Check input tensors' shape/rank condition assert indices.shape[-1] <= data_rank # The list of data/indice shape of batch_dims batch_dims_shape = [] # The number of elements in the batch_dims for data/indice array batch_dims_size = 1 # Check the shape of indice and data are identicial for batch dims. for i in range(batch_dims): batch_dims_shape.append(indices.shape[i]) batch_dims_size *= indices.shape[i] # Compute output of the op as below # Compute shape of output array output_shape = ( batch_dims_shape + list(indices.shape)[batch_dims:-1] if (indices.shape[-1] == data_rank - batch_dims) else batch_dims_shape + list(indices.shape)[batch_dims:-1] + list(data.shape)[batch_dims + indices.shape[-1] :] ) # Placeholder for output data output_data_buffer = [] # Flatten 'indices' to 2D array reshaped_indices = indices.reshape(batch_dims_size, -1, indices.shape[-1]) # Flatten 'data' to array of shape (batch_dim_size, data.shape[batch_dimes:]) reshaped_data = data.reshape((batch_dims_size,) + data.shape[batch_dims:]) # gather each scalar value from 'data' for batch_dim in range(reshaped_indices.shape[0]): for outer_dim in range(reshaped_indices.shape[1]): gather_index = tuple(reshaped_indices[batch_dim][outer_dim]) output_data_buffer.append(reshaped_data[(batch_dim, *gather_index)]) return np.asarray(output_data_buffer, dtype=data.dtype).reshape(output_shape) class Gather_nd(RunAll): @staticmethod def gather_nd_fp16x16(): def gather_nd_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=0) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_nd_fp16x16_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))", name= name) def batch_dims1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=1) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_nd_fp16x16_3d_batch_dims1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))", name= name) def batch_dims2(): x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=2) x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gather_nd_fp16x16_3d_batch_dims2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))", name= name) default() batch_dims1() batch_dims2() gather_nd_3D() @staticmethod def gather_nd_fp8x23(): def gather_nd_3D(): def default(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.int64) y = gather_nd_impl(x1, x2, batch_dims=0) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "gather_nd_fp8x23_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))", name= name) def batch_dims1(): x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.int64) y = gather_nd_impl(x1, x2, batch_dims=1) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "gather_nd_fp8x23_3d_batch_dims1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))", name= name) def batch_dims2(): x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=2) x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "gather_nd_fp8x23_3d_batch_dims2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))", name= name) default() batch_dims1() batch_dims2() gather_nd_3D() @staticmethod def gather_nd_i8(): def gather_nd_3D(): def default(): x1 = np.arange(0,9).reshape(3,3).astype(np.int8) x2 = np.random.randint(low = 0,high=2, size=(3,2)).astype(np.int8) y = gather_nd_impl(x1, x2, batch_dims=0) x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "gather_nd_i8_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))", name= name) def batch_dims1(): x1 = np.arange(0,9).reshape(3,3).astype(np.int8) x2 = np.random.randint(low = 0,high=2, size=(3,1)).astype(np.int8) y = gather_nd_impl(x1, x2, batch_dims=1) x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "gather_nd_i8_3d_batch_dims1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))", name= name) default() batch_dims1() gather_nd_3D() @staticmethod def gather_nd_i32(): def gather_nd_3D(): def default(): x1 = np.arange(0,24).reshape(4,2,3).astype(np.int32) x2 = np.random.randint(low = 0,high=2, size=(3,2)).astype(np.int32) y = gather_nd_impl(x1, x2, batch_dims=0) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "gather_nd_i32_3d_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))", name= name) def batch_dims1(): x1 = np.arange(0,108).reshape(4,3,3,3).astype(np.int32) x2 = np.random.randint(low = 0,high=3, size=(4,2,3)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=1) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "gather_nd_i32_3d_batch_dims1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))", name= name) def batch_dims2(): x1 = np.arange(0,54).reshape(3,3,3,2).astype(np.int64) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=2) x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "gather_nd_i32_3d_batch_dims2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))", name= name) default() batch_dims1() batch_dims2() gather_nd_3D() @staticmethod def gather_nd_u32(): def gather_nd_3D(): def default(): x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=0) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "gather_nd_u32_default" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0))", name= name) def batch_dims1(): x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=1) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "gather_nd_u32_batch_dims1" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1))", name= name) def batch_dims2(): x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) x2 = np.random.randint(low = 0,high=2, size=(3,3,2)).astype(np.uint32) y = gather_nd_impl(x1, x2, batch_dims=2) x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "gather_nd_u32_batch_dims2" make_test( inputs = [x1, x2], output = y, func_sig = "input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2))", name= name) default() batch_dims1() batch_dims2() gather_nd_3D()
https://github.com/gizatechxyz/orion
nodegen/node/gemm.py
from typing import Optional import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def gemm_reference_implementation( A: np.ndarray, B: np.ndarray, C: Optional[np.ndarray] = None, alpha: float = 1.0, beta: float = 1.0, transA: int = 0, transB: int = 0, ) -> np.ndarray: A = A if transA == 0 else A.T B = B if transB == 0 else B.T C = C if C is not None else np.array(0) Y = alpha * np.dot(A, B) + beta * C return Y class Gemm(RunAll): @staticmethod def gemm_default_zero_bias(): a = np.random.ranf([3, 5]).astype(np.float32) b = np.random.ranf([5, 4]).astype(np.float32) c = np.zeros([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_default_no_bias" make_test( [a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false)", name, Trait.NN) @staticmethod def gemm_default_vector_bias(): a = np.random.ranf([2, 7]).astype(np.float32) b = np.random.ranf([7, 4]).astype(np.float32) c = np.random.ranf([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) c = Tensor(Dtype.FP16x16, c.shape, to_fp( c.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_default_vector_bias" make_test( [a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false)", name, Trait.NN) @staticmethod def gemm_default_matrix_bias(): a = np.random.ranf([3, 6]).astype(np.float32) b = np.random.ranf([6, 4]).astype(np.float32) c = np.random.ranf([3, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) c = Tensor(Dtype.FP16x16, c.shape, to_fp( c.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_default_matrix_bias" make_test( [a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false)", name, Trait.NN) @staticmethod def gemm_transposeA(): a = np.random.ranf([6, 3]).astype(np.float32) b = np.random.ranf([6, 4]).astype(np.float32) c = np.zeros([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c, transA=1) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_transposeA" make_test( [a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false)", name, Trait.NN) @staticmethod def gemm_transposeB(): a = np.random.ranf([3, 6]).astype(np.float32) b = np.random.ranf([4, 6]).astype(np.float32) c = np.zeros([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c, transB=1) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_transposeB" make_test( [a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true)", name, Trait.NN) @staticmethod def gemm_alpha(): a = np.random.ranf([3, 5]).astype(np.float32) b = np.random.ranf([5, 4]).astype(np.float32) c = np.zeros([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c, alpha=0.5) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_alpha" make_test( [a, b], y, "NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false)", name, Trait.NN) @staticmethod def gemm_beta(): a = np.random.ranf([2, 7]).astype(np.float32) b = np.random.ranf([7, 4]).astype(np.float32) c = np.random.ranf([1, 4]).astype(np.float32) y = gemm_reference_implementation(a, b, c, beta=0.5) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) c = Tensor(Dtype.FP16x16, c.shape, to_fp( c.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_beta" make_test( [a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false)", name, Trait.NN) @staticmethod def gemm_all_attributes(): a = np.random.ranf([4, 3]).astype(np.float32) b = np.random.ranf([5, 4]).astype(np.float32) c = np.random.ranf([1, 5]).astype(np.float32) y = gemm_reference_implementation( a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35 ) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) c = Tensor(Dtype.FP16x16, c.shape, to_fp( c.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "gemm_all_attributes" make_test( [a, b, c], y, "NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/greater.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Greater(RunAll): @staticmethod def greater_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.greater(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_u32" make_test([x, y], z, "input_0.greater(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.greater(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_u32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) default() broadcast() @staticmethod def greater_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.greater(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i32" make_test([x, y], z, "input_0.greater(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (1, 2)).astype(np.int32) z = np.greater(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) default() broadcast() @staticmethod def greater_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.greater(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i8" make_test([x, y], z, "input_0.greater(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (1, 2)).astype(np.int8) z = np.greater(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i8_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) default() broadcast() @staticmethod def greater_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.greater(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp8x23" make_test([x, y], z, "input_0.greater(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.greater(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp8x23_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) default() broadcast() @staticmethod def greater_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.greater(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp16x16" make_test([x, y], z, "input_0.greater(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.greater(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp16x16_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/greater_equal.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Greater_equal(RunAll): @staticmethod def greater_equal_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.greater_equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_u32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (1, 3, 1)).astype(np.uint32) z = np.greater_equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_u32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) default() broadcast() @staticmethod def greater_equal_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.greater_equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32) z = np.greater_equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) default() broadcast() @staticmethod def greater_equal_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.greater_equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i8" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8) z = np.greater_equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i8_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) default() broadcast() @staticmethod def greater_equal_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.greater_equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp8x23" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = np.greater_equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) default() broadcast() @staticmethod def greater_equal_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.greater_equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp16x16" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = np.greater_equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/grid_sample.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait from .resize import _get_all_coords import numbers from typing import List import numpy as np #from onnx.reference.ops.op_resize import _get_all_coords def grid_sample(X, grid, mode='linear', padding_mode='zeros', align_corners=0): x_dims = X.shape grid_dims = grid.shape N = x_dims[0] C = x_dims[1] y_dims = (N, C, *grid_dims[1:-1]) if np.prod(y_dims) == 0: return np.array([], dtype=X.dtype) Y = np.empty(y_dims, dtype=X.dtype) for n in range(N): grid_data = grid[n] for c in range(C): X_data = X[n, c] num_dims = len(x_dims[2:]) dims = x_dims[2:] border = _prepare_border(dims, align_corners=align_corners) for ox in _get_all_coords(Y[n, c]): nx = grid_data[tuple(ox)] nx = nx[::-1] x = _gs_denormalize_coordinates( n=nx, dims=dims, align_corners=align_corners ) if mode == "nearest": x = np.rint(x) for i, v in enumerate(x): x_min = border[i] x_max = border[i + num_dims] if v < x_min or v > x_max: if padding_mode == "border": x[i] = _clamp(v, 0, dims[i] - 1) elif padding_mode == "reflection": x[i] = _gs_reflect(v, x_min, x_max) if mode == "nearest": x = x.astype(np.int32) Y[n][c][tuple(ox)] = _pixel_at_ndarray( ndarray=X_data, x=x, border=border, padding_mode=padding_mode, ) elif mode == "linear": Y[n][c][tuple(ox)] = _gs_linear_interpolation_nd_with_x( data=X_data, x=x, border=border, padding_mode=padding_mode ) elif mode == "cubic": Y[n][c][tuple(ox)] = _gs_cubic_interpolation_nd_with_x( data=X_data, x=x, border=border, padding_mode=padding_mode ) else: raise RuntimeError( "GridSample interpolation only supports nearest, linear, and cubic modes." ) return (Y.astype(X.dtype),) def _gs_denormalize(n, length: int, align_corners: bool): if align_corners: x = (n + 1) / 2.0 * (length - 1) else: x = ((n + 1) * length - 1) / 2.0 return x def _gs_denormalize_coordinates(n, dims, align_corners: bool): x = np.zeros(len(n), dtype=np.float32) for i, (v, dim) in enumerate(zip(n, dims)): x[i] = _gs_denormalize(n=v, length=dim, align_corners=align_corners) return x def _gs_reflect(x, x_min, x_max): # type: ignore """Reflect by the near border till within the borders Use float for borders to avoid potential issues with integer T """ fx = x rng = x_max - x_min if fx < x_min: dx = x_min - fx n = int(dx / rng) r = dx - n * rng if n % 2 == 0: fx = x_min + r else: fx = x_max - r elif fx > x_max: dx = fx - x_max n = int(dx / rng) r = dx - n * rng if n % 2 == 0: fx = x_max - r else: fx = x_min + r return fx def _gs_get_cubic_coeffs(x, coeffs): # type: ignore """Calculate cubic convolution interpolation coefficients ROBERT G. KEYS https://ieeexplore.ieee.org/document/1163711 Use float to avoid potential issues with integer. """ cubic_alpha = -0.75 x = abs(x) coeffs[0] = ( (cubic_alpha * (x + 1) - 5 * cubic_alpha) * (x + 1) + 8 * cubic_alpha ) * (x + 1) - 4 * cubic_alpha coeffs[1] = ((cubic_alpha + 2) * x - (cubic_alpha + 3)) * x * x + 1 coeffs[2] = ((cubic_alpha + 2) * (1 - x) - (cubic_alpha + 3)) * (1 - x) * ( 1 - x ) + 1 coeffs[3] = ( (cubic_alpha * (2 - x) - 5 * cubic_alpha) * (2 - x) + 8 * cubic_alpha ) * (2 - x) - 4 * cubic_alpha def _gs_get_linear_coeffs(x, coeffs): x = abs(x) coeffs[0] = 1 - x coeffs[1] = x def _gs_bicubic_interpolate(p, x, y): # type: ignore v = np.empty((4,), dtype=p.dtype) coeffs = np.empty((4,), dtype=p.dtype) _gs_get_cubic_coeffs(x, coeffs) for i in range(4): v[i] = coeffs @ p[i, :] _gs_get_cubic_coeffs(y, coeffs) return coeffs @ v def _gs_cubic_interpolation_1d_with_x(data, x, border, padding_mode): v = np.empty((4,), dtype=data.dtype) coeffs = np.empty((4,), dtype=data.dtype) x_0 = int(np.floor(x)) x_1 = x_0 + 1 x_2 = x_0 + 2 x_minus_1 = x_0 - 1 _gs_get_cubic_coeffs(x - x_0, coeffs) v[0] = _pixel_at_array( array=data, i=x_minus_1, border=border, padding_mode=padding_mode ) v[1] = _pixel_at_array( array=data, i=x_0, border=border, padding_mode=padding_mode ) v[2] = _pixel_at_array( array=data, i=x_1, border=border, padding_mode=padding_mode ) v[3] = _pixel_at_array( array=data, i=x_2, border=border, padding_mode=padding_mode ) return coeffs @ v def _gs_linear_interpolation_1d_with_x(data, x, border, padding_mode): v = np.empty((2,), dtype=data.dtype) coeffs = np.empty((2,), dtype=data.dtype) x_0 = int(np.floor(x)) x_1 = x_0 + 1 _gs_get_linear_coeffs(x - x_0, coeffs) v[0] = _pixel_at_array( array=data, i=x_0, border=border, padding_mode=padding_mode ) v[1] = _pixel_at_array( array=data, i=x_1, border=border, padding_mode=padding_mode ) return coeffs @ v def _gs_linear_interpolation_nd_with_x(data, x, border, padding_mode): num_dims = data.ndim assert num_dims == len(x) == int(len(border) / 2) if num_dims == 1: return _gs_linear_interpolation_1d_with_x( data=data, x=x[0], border=border, padding_mode=padding_mode ) res1d = [] for i in range(data.shape[0]): r = _gs_linear_interpolation_nd_with_x( data=data[i], x=x[1:], border=list(border[1:num_dims]) + list(border[1 + num_dims : 2 * num_dims]), padding_mode=padding_mode, ) res1d.append(r) res1d = np.array(res1d) return _gs_linear_interpolation_1d_with_x( data=res1d, x=x[0], border=[border[0], border[num_dims]], padding_mode=padding_mode, ) def _gs_cubic_interpolation_nd_with_x(data, x, border, padding_mode): num_dims = data.ndim assert num_dims == len(x) == int(len(border) / 2) if num_dims == 1: return _gs_cubic_interpolation_1d_with_x( data=data, x=x[0], border=border, padding_mode=padding_mode ) res1d = [] for i in range(data.shape[0]): r = _gs_cubic_interpolation_nd_with_x( data=data[i], x=x[1:], border=list(border[1:num_dims]) + list(border[1 + num_dims : 2 * num_dims]), padding_mode=padding_mode, ) res1d.append(r) res1d = np.array(res1d) return _gs_cubic_interpolation_1d_with_x( data=res1d, x=x[0], border=[border[0], border[num_dims]], padding_mode=padding_mode, ) def _clamp(val, lo, hi): # type: ignore if val < lo: return lo if val > hi: return hi return val def _pixel_at_ndarray(ndarray, x: List, border, padding_mode): # type: ignore # boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...] num_dims = ndarray.ndim assert num_dims == len(x) == int(len(border) / 2) if num_dims == 1: return _pixel_at_array( array=ndarray, i=x[0], border=border, padding_mode=padding_mode ) i = x[0] d = ndarray.shape[0] if padding_mode == "zeros": if i >= 0 and i < d: ndarray = ndarray[i] else: # Trick i = 0 ndarray = np.zeros_like(ndarray[i]) elif padding_mode == "border": i = _clamp(i, 0, d - 1) ndarray = ndarray[i] else: i = int(_gs_reflect(i, border[0], border[num_dims])) ndarray = ndarray[i] return _pixel_at_ndarray( ndarray=ndarray, x=x[1:], border=list(border[1:num_dims]) + list(border[1 + num_dims : 2 * num_dims]), padding_mode=padding_mode, ) def _pixel_at_array(array, i: int, border, padding_mode): # type: ignore assert array.ndim == 1 d = array.shape[0] if padding_mode == "zeros": if i >= 0 and i < d: pixel = array[i] else: pixel = 0 elif padding_mode == "border": i = _clamp(i, 0, d - 1) pixel = array[i] else: i = int(_gs_reflect(i, border[0], border[1])) pixel = array[i] return pixel def _prepare_border(dims, align_corners: bool): # boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...] num_dims = len(dims) borders = np.zeros(num_dims * 2) for i in range(num_dims): # min borders[i] = -0.5 # max borders[i + num_dims] = dims[i] - 0.5 if align_corners: # min borders[i] = 0.0 # max borders[i + num_dims] = dims[i] - 1.0 return borders class Grid_sample(RunAll): @staticmethod def export_gridsample() -> None: x = np.array( [ [ [ [0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0], [8.0, 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0], ] ] ], dtype=np.float32, ) grid = np.array( [ [ [ [-1.0000, -1.0000], [-0.6000, -1.0000], [-0.2000, -1.0000], [0.2000, -1.0000], [0.6000, -1.0000], [1.0000, -1.0000], ], [ [-1.0000, -0.6000], [-0.6000, -0.6000], [-0.2000, -0.6000], [0.2000, -0.6000], [0.6000, -0.6000], [1.0000, -0.6000], ], [ [-1.0000, -0.2000], [-0.6000, -0.2000], [-0.2000, -0.2000], [0.2000, -0.2000], [0.6000, -0.2000], [1.0000, -0.2000], ], [ [-1.0000, 0.2000], [-0.6000, 0.2000], [-0.2000, 0.2000], [0.2000, 0.2000], [0.6000, 0.2000], [1.0000, 0.2000], ], [ [-1.0000, 0.6000], [-0.6000, 0.6000], [-0.2000, 0.6000], [0.2000, 0.6000], [0.6000, 0.6000], [1.0000, 0.6000], ], [ [-1.0000, 1.0000], [-0.6000, 1.0000], [-0.2000, 1.0000], [0.2000, 1.0000], [0.6000, 1.0000], [1.0000, 1.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="linear") y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_paddingmode_zeros() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-10.0000, -10.0000], [-5.0000, -5.0000], [-0.2000, -0.2000], [10.0000, 10.0000], ], [ [10.0000, 10.0000], [-0.2000, -0.2000], [5.0000, 5.0000], [10.0000, 10.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="linear") y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_padding_zeros" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_paddingmode_border() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-10.0000, -10.0000], [-5.0000, -5.0000], [-0.2000, -0.2000], [10.0000, 10.0000], ], [ [10.0000, 10.0000], [-0.2000, -0.2000], [5.0000, 5.0000], [10.0000, 10.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="linear", padding_mode="border") y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_padding_border" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(PADDING_MODE::BORDER))" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_paddingmode_reflection() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-10.0000, -10.0000], [-5.0000, -5.0000], [-0.2000, -0.2000], [10.0000, 10.0000], ], [ [10.0000, 10.0000], [-0.2000, -0.2000], [5.0000, 5.0000], [10.0000, 10.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="linear", padding_mode="reflection") y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_padding_reflection" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(PADDING_MODE::REFLECTION))" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_mode_aligncorners() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-1.0000, -1.0000], [-0.5000, -0.5000], [-0.2000, -0.2000], [0.0000, 0.0000], ], [ [0.0000, 0.0000], [-0.2000, -0.2000], [0.5000, 0.5000], [1.0000, 1.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="linear", align_corners=1) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_aligncorners" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::Some(1)," func_sig += "Option::None," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_nearest() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-1.0000, -1.0000], [-0.5000, -0.5000], [-0.2000, -0.2000], [0.0000, 0.0000], ], [ [0.0000, 0.0000], [-0.2000, -0.2000], [0.5000, 0.5000], [1.0000, 1.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="nearest", align_corners=0) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_nearest" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::Some(0)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_nearest_align_corner() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-1.0000, -1.0000], [-0.5000, -0.5000], [-0.2000, -0.2000], [0.0000, 0.0000], ], [ [0.0000, 0.0000], [-0.2000, -0.2000], [0.5000, 0.5000], [1.0000, 1.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="nearest", align_corners=1) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_nearest_aligncorner" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::Some(1)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN) @staticmethod def export_gridsample_cubic() -> None: x = np.array( [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], dtype=np.float32, ) grid = np.array( [ [ [ [-1.0000, -1.0000], [-0.5000, -0.5000], [-0.2000, -0.2000], [0.0000, 0.0000], ], [ [0.0000, 0.0000], [-0.2000, -0.2000], [0.5000, 0.5000], [1.0000, 1.0000], ], ] ], dtype=np.float32, ) y = grid_sample(x, grid, mode ="cubic", align_corners=0) y = np.array(y[0]) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "grid_sample_cubic" func_sig = "NNTrait::grid_sample(" func_sig += "@input_0," func_sig += "@input_1," func_sig += "Option::Some(0)," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None)" make_test( [x, grid], y, func_sig, name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/hamming_window.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement def hamming_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore if periodic == 1: N_1 = size else: N_1 = size - 1 ni = np.arange(size, dtype=output_datatype) alpha = 25.0 / 46.0 beta = 1 - alpha res = alpha - np.cos(ni * np.float64(np.pi).astype(output_datatype) * 2 / N_1).astype(output_datatype) * beta return res.astype(output_datatype) class Hamming_window(RunAll): @staticmethod # We test here with fp8x23 implementation. def fp8x23(): args = [4] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) y = hamming_window(*args, np.float64) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) # Define the name of the generated folder. name = "hamming_window_fp8x23" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. name # The name of the generated folder. ) @staticmethod # We test here with fp16x16 implementation. def fp16x16(): args = [10] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) y = hamming_window(*args, np.float16) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # Define the name of the generated folder. name = "hamming_window_fp16x16" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. name # The name of the generated folder. ) # @staticmethod # # We test here with i8 implementation. # def i8(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) # args = [5] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) # y = hamming_window(*args, np.int8) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I8, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hamming_window_i8" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(1))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with i32 implementation. # def i32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) # y = hamming_window(*args, np.int32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hamming_window_i32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with u32 implementation. # def u32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) # y = hamming_window(*args, np.uint32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.U32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hamming_window_u32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # )
https://github.com/gizatechxyz/orion
nodegen/node/hann_window.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement def hann_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore if periodic == 1: N_1 = size else: N_1 = size - 1 ni = np.arange(size, dtype=output_datatype) res = np.sin((ni * np.float64(np.pi).astype(output_datatype) / N_1).astype(output_datatype)) ** 2 return res.astype(output_datatype) class Hann_window(RunAll): @staticmethod # We test here with fp8x23 implementation. def fp8x23(): print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)) args = [4] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) y = hann_window(*args, np.float64) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) # Define the name of the generated folder. name = "hann_window_fp8x23" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. name # The name of the generated folder. ) @staticmethod # We test here with fp16x16 implementation. def fp16x16(): print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) args = [10] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) y = hann_window(*args, np.float16) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # Define the name of the generated folder. name = "hann_window_fp16x16" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. name # The name of the generated folder. ) # @staticmethod # # We test here with i8 implementation. # def i8(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) # args = [5] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) # y = hann_window(*args, np.int8) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I8, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hann_window_i8" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(1))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with i32 implementation. # def i32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) # y = hann_window(*args, np.int32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.I32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hann_window_i32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # ) # @staticmethod # # We test here with u32 implementation. # def u32(): # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) # args = [4] # # x = np.float64(4) # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) # y = hann_window(*args, np.uint32) # print(y) # # Convert the floats values in `y` to fixed points with `to_fp` method: # y = Tensor(Dtype.U32, y.shape, y.flatten()) # # Define the name of the generated folder. # name = "hann_window_u32" # # Invoke `make_test` method to generate corresponding Cairo tests: # make_test( # [], # List of input tensors. # y, # The expected output result. # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. # name # The name of the generated folder. # )
https://github.com/gizatechxyz/orion
nodegen/node/hard_sigmoid.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Hard_sigmoid(RunAll): @staticmethod def fp8x23(): alpha = 0.2 beta = 0.5 x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32) y = np.maximum(0, np.minimum(1, alpha * x + beta)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "hard_sigmoid_fp8x23" make_test([x], y, "NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false))", name, Trait.NN) @staticmethod def fp16x16(): alpha = 0.2 beta = 0.5 x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32) y = np.maximum(0, np.minimum(1, alpha * x + beta)) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "hard_sigmoid_fp16x16" make_test([x], y, "NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false))", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/identity.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Identity(RunAll): @staticmethod def identity_fP8x23(): def identity(): x = np.array([[1, 2], [3, 4]]) y = x x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "identity_fP8x23" make_test( [x], y, "input_0.identity()", name) identity() @staticmethod def identity_fP16x16(): def identity(): x = np.array([[1, 2], [3, 4]]) y = x x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "identity_fP16x16" make_test( [x], y, "input_0.identity()", name) identity() @staticmethod def identity_i8(): def identity(): x = np.array([[1, 2], [3, 4]]) y = x x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "identity_i8" make_test( [x], y, "input_0.identity()", name) identity() @staticmethod def identity_i32(): def identity(): x = np.array([[1, 2], [3, 4]]) y = x x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "identity_i32" make_test( [x], y, "input_0.identity()", name) identity() @staticmethod def identity_u32(): def identity(): x = np.array([[1, 2], [3, 4]]) y = x x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "identity_u32" make_test( [x], y, "input_0.identity()", name) identity()
https://github.com/gizatechxyz/orion
nodegen/node/is_inf.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl INF = 2**32 - 1 class Is_inf(RunAll): @staticmethod def is_inf_u32(): def default(): input_0 = np.array([1, 0, INF, 8, -INF, INF], dtype=np.uint32) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.U32, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_u32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) default() @staticmethod def is_inf_i32(): def default(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) output = np.array([False, False, True, False, False, True], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_pos_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) output = np.array([False, False, False, False, True, False], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_neg_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() negative() @staticmethod def is_inf_i8(): def default(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int8) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_i8" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) output = np.array([False, False, True, False, False, True], dtype=bool) input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_pos_inf_i8" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) output = np.array([False, False, False, False, True, False], dtype=bool) input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_neg_inf_i8" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() negative() @staticmethod def is_inf_fp8x23(): def default(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP8x23)) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_fp8x23" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, True, False, False, True], dtype=bool) input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_pos_inf_fp8x23" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, False, False, True, False], dtype=bool) input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_neg_inf_fp8x23" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() negative() @staticmethod def is_inf_fp16x16(): def default(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP16x16)) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_fp16x16" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, True, False, False, True], dtype=bool) input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_pos_inf_fp16x16" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) output = np.array([False, False, False, False, True, False], dtype=bool) input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_neg_inf_fp16x16" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() negative()
https://github.com/gizatechxyz/orion
nodegen/node/is_nan.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl # NaN is represented with -0 NaN = -0 class Is_nan(RunAll): @staticmethod def is_nan_fp8x23(): def default(): input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP8x23)) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_nan_fp8x23" make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) default() @staticmethod def is_nan_fp16x16(): def default(): input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP16x16)) output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_nan_fp16x16" make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) default()
https://github.com/gizatechxyz/orion
nodegen/node/label_encoder.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait # Copyright (c) ONNX Project Contributors # SPDX-License-Identifier: Apache-2.0 # pylint: disable=R0913,R0914,W0221 def labelEncoder( # type: ignore x, default_float=None, default_int64=None, default_string=None, keys_floats=None, keys_int64s=None, keys_strings=None, values_floats=None, values_int64s=None, values_strings=None, ): keys = keys_floats if keys_floats is not None else (keys_int64s if np.any(keys_int64s) else keys_strings) values = values_floats if values_floats is not None else (values_int64s if np.any(values_int64s) else values_strings) classes = dict(zip(keys, values)) if id(keys) == id(keys_floats): cast = float elif id(keys) == id(keys_int64s): cast = int # type: ignore else: cast = str # type: ignore if id(values) == id(values_floats): defval = default_float dtype = np.float32 elif id(values) == id(values_int64s): defval = default_int64 dtype = np.int64 # type: ignore else: defval = default_string if not isinstance(defval, str): defval = "" dtype = np.str_ # type: ignore shape = x.shape if len(x.shape) > 1: x = x.flatten() res = [] for i in range(0, x.shape[0]): v = classes.get(cast(x[i]), defval) res.append(v) return np.array(res, dtype=dtype).reshape(shape) class Label_encoder(RunAll): @staticmethod def label_encoder_fp16x16(): def labelencoder(): def default(): x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3]).astype(np.int64) keys = np.array([1, 2, 5, 6, ]).astype(np.int64) values = np.array([11, 22, 55, 66]).astype(np.int64) default = np.array(99).astype(np.int64) y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) default = Tensor(Dtype.FP16x16, default.shape, to_fp(default.flatten(), FixedImpl.FP16x16)) keys = Tensor(Dtype.FP16x16, keys.shape, to_fp(keys.flatten(), FixedImpl.FP16x16)) values = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "label_encoder_fp16x16_3d_default" make_test( inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), keys:Option::None, keys_tensor: Option::Some(input_2), values: Option::None, values_tensor: Option::Some(input_3))""", name= name) default() labelencoder() @staticmethod def label_encoder_fp8x23(): def label_encoder(): def default(): x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int64) keys = np.array([1, 2, 5, 6, 7]).astype(np.int64) values = np.array([11, 22, 55, 66, 77]).astype(np.int64) default = np.array(99).astype(np.int64) y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) default = Tensor(Dtype.FP8x23, default.shape, to_fp(default.flatten(), FixedImpl.FP8x23)) keys = Tensor(Dtype.FP8x23, keys.shape, to_fp(keys.flatten(), FixedImpl.FP8x23)) values = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "label_encoder_fp8x23_default" make_test( inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), keys:Option::None, keys_tensor: Option::Some(input_2), values: Option::None, values_tensor: Option::Some(input_3))""", name= name) default() label_encoder() @staticmethod def label_encoder_i8(): def label_encoder_3D(): def default(): x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int8) keys = np.array([1, 2, 5, 6, 7]).astype(np.int8) values = np.array([11, 22, 55, 66, 77]).astype(np.int8) default = np.array(99).astype(np.int8) y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) x = Tensor(Dtype.I8, x.shape, x.flatten()) default = Tensor(Dtype.I8, default.shape, default.flatten()) keys = Tensor(Dtype.I8, keys.shape, keys.flatten()) values = Tensor(Dtype.I8, values.shape, values.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "label_encoder_i8_default" make_test( inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), keys:Option::None, keys_tensor: Option::Some(input_2), values: Option::None, values_tensor: Option::Some(input_3))""", name= name) default() label_encoder_3D() @staticmethod def label_encoder_i32(): def label_encoder_3D(): def default(): x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.int32) keys = np.array([1, 2, 5, 6, 7]).astype(np.int32) values = np.array([11, 22, 55, 66, 77]).astype(np.int32) default = np.array(99).astype(np.int32) y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) x = Tensor(Dtype.I32, x.shape, x.flatten()) default = Tensor(Dtype.I32, default.shape, default.flatten()) keys = Tensor(Dtype.I32, keys.shape, keys.flatten()) values = Tensor(Dtype.I32, values.shape, values.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "label_encoder_i32_default" make_test( inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), keys:Option::None, keys_tensor: Option::Some(input_2), values: Option::None, values_tensor: Option::Some(input_3))""", name= name) default() label_encoder_3D() @staticmethod def label_encoder_u32(): def label_encoder_3D(): def default(): x = np.array([1, 2, 3, 4, 5, 6, 1, 2, 3, 7, 8]).astype(np.uint32) keys = np.array([1, 2, 5, 6, 7]).astype(np.uint32) values = np.array([11, 22, 55, 66, 77]).astype(np.uint32) default = np.array(99).astype(np.uint32) y = labelEncoder(x=x, keys_int64s=keys, values_int64s=values, default_int64=default) x = Tensor(Dtype.U32, x.shape, x.flatten()) default = Tensor(Dtype.U32, default.shape, default.flatten()) keys = Tensor(Dtype.U32, keys.shape, keys.flatten()) values = Tensor(Dtype.U32, values.shape, values.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "label_encoder_u32_default" make_test( inputs = [x, default, keys, values], output = y, func_sig = """input_0.label_encoder(default_list:Option::None, default_tensor: Option::Some(input_1), keys:Option::None, keys_tensor: Option::Some(input_2), values: Option::None, values_tensor: Option::Some(input_3))""", name= name) default() label_encoder_3D()
https://github.com/gizatechxyz/orion
nodegen/node/layer_normalization.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait import numpy as np import onnx from onnx.backend.test.case.base import Base from onnx.backend.test.case.node import expect def _layer_normalization(X, W, B, axis=-1, epsilon=1e-5): X_shape = X.shape X_rank = len(X_shape) if axis < 0: axis = axis + X_rank unsqueezed_rank = X_rank - axis reduction_shape = X_shape[0:axis] + (1,) * unsqueezed_rank row_number = 1 col_number = 1 for i in range(X_rank): if i < axis: row_number *= X_shape[i] else: col_number *= X_shape[i] x_mat = np.reshape(X, (row_number, col_number)) x_mean = np.sum(x_mat, axis=1, keepdims=True) / col_number x_diff = x_mat - x_mean x_squared_diff = x_diff * x_diff variance = np.sum(x_squared_diff, axis=1, keepdims=True) / col_number variance_eps = variance + epsilon std_dev = np.sqrt(variance_eps) inv_std_dev = np.reciprocal(std_dev) y_mat = x_diff * inv_std_dev Y = np.reshape(y_mat, X_shape) * W + B X_mean = np.reshape(x_mean, reduction_shape) X_inv_std_dev = np.reshape(inv_std_dev, reduction_shape) return Y, X_mean, X_inv_std_dev def calculate_normalized_shape(X_shape, axis): X_rank = len(X_shape) if axis < 0: axis = axis + X_rank return X_shape[axis:] class Layer_normalization(RunAll): @staticmethod def export4d() -> None: X = np.random.randn(2, 3, 4, 5).astype(np.float32) def case(axis: int) -> None: normalized_shape = calculate_normalized_shape(X.shape, axis) W = np.random.randn(*normalized_shape).astype(np.float32) B = np.random.randn(*normalized_shape).astype(np.float32) Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis) if axis < 0: name = f"layer_normalization_4d_axis_negative_{-axis}" func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({-axis}),Option::None,Option::None)" else: name = f"layer_normalization_4d_axis{axis}" func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({axis}),Option::None,Option::None)" x = Tensor(Dtype.FP8x23, X.shape, to_fp(X.flatten(), FixedImpl.FP8x23)) w = Tensor(Dtype.FP8x23, W.shape, to_fp(W.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, B.shape, to_fp(B.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, Y.shape, to_fp(Y.flatten(), FixedImpl.FP8x23)) make_test([x,w,b], y, func_sig, name) for i in range(len(X.shape)): case(i) case(i - len(X.shape)) @staticmethod def export_default_axis() -> None: X = np.random.randn(2, 3, 4, 5).astype(np.float32) normalized_shape = calculate_normalized_shape(X.shape, -1) W = np.random.randn(*normalized_shape).astype(np.float32) B = np.random.randn(*normalized_shape).astype(np.float32) Y, mean, inv_std_dev = _layer_normalization(X, W, B) x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16)) name = "layer_normalization_default_axis" make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name) @staticmethod def export3d_epsilon() -> None: epsilon = 1e-1 X = np.random.randn(2, 3, 5).astype(np.float32) def case(axis: int) -> None: normalized_shape = calculate_normalized_shape(X.shape, axis) W = np.random.randn(*normalized_shape).astype(np.float32) B = np.random.randn(*normalized_shape).astype(np.float32) Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon) if axis < 0: name = f"layer_normalization_3d_axis_negative_{-axis}_epsilon" func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({-axis}),Option::Some(FixedTrait::new(6554, false)),Option::None)" else: name = f"layer_normalization_3d_axis{axis}_epsilon" func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some({axis}),Option::Some(FixedTrait::new(6554, false)),Option::None)" x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16)) make_test([x,w,b], y, func_sig, name) for i in range(len(X.shape)): case(i) case(i - len(X.shape)) @staticmethod def test_2d_example() -> None: X = np.random.randn(3, 4).astype(np.float32) def case(axis: int) -> None: normalized_shape = calculate_normalized_shape(X.shape, axis) W = np.random.randn(*normalized_shape).astype(np.float32) B = np.random.randn(*normalized_shape).astype(np.float32) Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis) node = onnx.helper.make_node( "LayerNormalization", inputs=["X", "W", "B"], outputs=["Y", "Mean", "InvStdDev"], axis=axis, ) x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16)) name = "layer_normalization_test" make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name) case(-1)
https://github.com/gizatechxyz/orion
nodegen/node/leaky_relu.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait import tensorflow as tf class Leaky_relu(RunAll): @staticmethod def leaky_relu_fp8x23(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) layer = tf.keras.layers.LeakyReLU(alpha=0.1) y = layer(x).numpy() x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "leaky_relu_fp8x23" make_test([x], y, "NNTrait::leaky_relu(@input_0, @FixedTrait::new(838861, false))", name, Trait.NN) @staticmethod def leaky_relu_fp16x16(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) layer = tf.keras.layers.LeakyReLU(alpha=0.1) y = layer(x).numpy() x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "leaky_relu_fp16x16" make_test([x], y, "NNTrait::leaky_relu(@input_0, @FixedTrait::new(6554, false))", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/less.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Less(RunAll): @staticmethod def less_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.less(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (1, 3, 1)).astype(np.uint32) z = np.less(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) default() broadcast() @staticmethod def less_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.less(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32) z = np.less(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) default() broadcast() @staticmethod def less_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.less(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8) z = np.less(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) default() broadcast() @staticmethod def less_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.less(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = np.less(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) default() broadcast() @staticmethod def less_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.less(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = np.less(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/less_equal.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Less_equal(RunAll): @staticmethod def less_equal_u32(): def default(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (2, 2)).astype(np.uint32) z = np.less_equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.less_equal(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) default() broadcast() @staticmethod def less_equal_i32(): def default(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (2, 2)).astype(np.int32) z = np.less_equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (1, 2)).astype(np.int32) z = np.less_equal(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) default() broadcast() @staticmethod def less_equal_i8(): def default(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (2, 2)).astype(np.int8) z = np.less_equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8" make_test([x, y], z, "input_0.less_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (1, 2)).astype(np.int8) z = np.less_equal(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) default() broadcast() @staticmethod def less_equal_fp8x23(): def default(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (2, 2)).astype(np.float64) z = np.less_equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23" make_test([x, y], z, "input_0.less_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.less_equal(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) default() broadcast() @staticmethod def less_equal_fp16x16(): def default(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (2, 2)).astype(np.float64) z = np.less_equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16" make_test([x, y], z, "input_0.less_equal(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.less_equal(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/linear.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait from typing import Optional def linear( i: np.ndarray, w: np.ndarray, b: Optional[np.ndarray] = None, ) -> np.ndarray: return np.dot(i, w.T) + b class Linear(RunAll): @staticmethod def linear_i32(): i = np.random.randint(-5, 9, (3)).astype(np.int32) w = np.random.randint(-5, 9, (2, 3)).astype(np.int32) b = np.random.randint(-5, 9, (2)).astype(np.int32) y = linear(i, w, b) i = Tensor(Dtype.I32, i.shape, i.flatten()) w = Tensor(Dtype.I32, w.shape, w.flatten()) b = Tensor(Dtype.I32, b.shape, b.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "linear_i32" make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)", name, Trait.NN) @staticmethod def linear_i8(): i = np.random.randint(-3, 3, (3)).astype(np.int8) w = np.random.randint(-3, 3, (2, 3)).astype(np.int8) b = np.random.randint(-3, 3, (2)).astype(np.int8) y = linear(i, w, b) i = Tensor(Dtype.I8, i.shape, i.flatten()) w = Tensor(Dtype.I8, w.shape, w.flatten()) b = Tensor(Dtype.I8, b.shape, b.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "linear_i8" make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)", name, Trait.NN) @staticmethod def linear_u32(): i = np.random.randint(0, 6, (3)).astype(np.uint32) w = np.random.randint(0, 6, (2, 3)).astype(np.uint32) b = np.random.randint(0, 6, (2)).astype(np.uint32) y = linear(i, w, b) i = Tensor(Dtype.U32, i.shape, i.flatten()) w = Tensor(Dtype.U32, w.shape, w.flatten()) b = Tensor(Dtype.U32, b.shape, b.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "linear_u32" make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)", name, Trait.NN) @staticmethod def linear_fp8x23(): i = np.random.uniform(-5, 7, (3)).astype(np.float64) w = np.random.uniform(-5, 7, (2, 3)).astype(np.float64) b = np.random.uniform(-5, 7, (2)).astype(np.float64) y = linear(i, w, b) i = Tensor(Dtype.FP8x23, i.shape, to_fp( i.flatten(), FixedImpl.FP8x23)) w = Tensor(Dtype.FP8x23, w.shape, to_fp( w.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, b.shape, to_fp( b.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "linear_fp8x23" make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)", name, Trait.NN) @staticmethod def linear_fp16x16(): i = np.random.uniform(-5, 7, (3)).astype(np.float64) w = np.random.uniform(-5, 7, (2, 3)).astype(np.float64) b = np.random.uniform(-5, 7, (2)).astype(np.float64) y = linear(i, w, b) i = Tensor(Dtype.FP16x16, i.shape, to_fp( i.flatten(), FixedImpl.FP16x16)) w = Tensor(Dtype.FP16x16, w.shape, to_fp( w.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "linear_fp16x16" make_test([i, w, b], y, "NNTrait::linear(input_0, input_1, input_2)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/log.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Log(RunAll): @staticmethod def log_fp8x23(): x = np.random.uniform(1, 127, (2, 2)).astype(np.float64) y = np.log(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "log_fp8x23" make_test([x], y, "input_0.log()", name) @staticmethod def log_fp16x16(): x = np.random.uniform(1, 127, (2, 2)).astype(np.float64) y = np.log(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "log_fp16x16" make_test([x], y, "input_0.log()", name)
https://github.com/gizatechxyz/orion
nodegen/node/logsoftmax.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def logsoftmax(x: np.ndarray, axis: int = -1) -> np.ndarray: x_max = np.max(x, axis=axis, keepdims=True) tmp = np.exp(x - x_max) s = np.sum(tmp, axis=axis, keepdims=True) return (x - x_max) - np.log(s) class Logsoftmax(RunAll): def logsoftmax_fp8x23(): def axis_0(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = logsoftmax(x, 0) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "logsoftmax_fp8x23_axis_0" make_test([x], y, "NNTrait::logsoftmax(@input_0, 0)", name, Trait.NN) def axis_1(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = logsoftmax(x, 1) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "logsoftmax_fp8x23_axis_1" make_test([x], y, "NNTrait::logsoftmax(@input_0, 1)", name, Trait.NN) axis_0() axis_1() def logsoftmax_fp16x16(): def axis_0(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = logsoftmax(x, 0) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "logsoftmax_fp16x16_axis_0" make_test([x], y, "NNTrait::logsoftmax(@input_0, 0)", name, Trait.NN) def axis_1(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = logsoftmax(x, 1) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "logsoftmax_fp16x16_axis_1" make_test([x], y, "NNTrait::logsoftmax(@input_0, 1)", name, Trait.NN) axis_0() axis_1()
https://github.com/gizatechxyz/orion
nodegen/node/matmul.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Matmul(RunAll): @staticmethod def matmul_u32(): def matmul_1D(): a = np.random.randint(0, 255, (3)).astype(np.uint32) b = np.random.randint(0, 255, (3)).astype(np.uint32) y = np.matmul(a, b).reshape((1)) a = Tensor(Dtype.U32, a.shape, a.flatten()) b = Tensor(Dtype.U32, b.shape, b.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "matmul_u32_1d" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x2(): a = np.random.randint(0, 255, (2, 2)).astype(np.uint32) b = np.random.randint(0, 255, (2, 2)).astype(np.uint32) y = np.matmul(a, b) a = Tensor(Dtype.U32, a.shape, a.flatten()) b = Tensor(Dtype.U32, b.shape, b.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "matmul_u32_2x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x1(): a = np.random.randint(0, 255, (2, 1)).astype(np.uint32) b = np.random.randint(0, 255, (1, 2)).astype(np.uint32) y = np.matmul(a, b) a = Tensor(Dtype.U32, a.shape, a.flatten()) b = Tensor(Dtype.U32, b.shape, b.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "matmul_u32_2x1" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_1x2(): a = np.random.randint(0, 255, (1, 2)).astype(np.uint32) b = np.random.randint(0, 255, (2, 1)).astype(np.uint32) y = np.matmul(a, b) a = Tensor(Dtype.U32, a.shape, a.flatten()) b = Tensor(Dtype.U32, b.shape, b.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "matmul_u32_1x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) matmul_1D() matmul_2x2() matmul_2x1() matmul_1x2() @staticmethod def matmul_i32(): def matmul_1D(): a = np.random.randint(-127, 127, (3)).astype(np.int32) b = np.random.randint(-127, 127, (3)).astype(np.int32) y = np.matmul(a, b).reshape((1)) a = Tensor(Dtype.I32, a.shape, a.flatten()) b = Tensor(Dtype.I32, b.shape, b.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "matmul_i32_1d" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x2(): a = np.random.randint(-127, 127, (2, 2)).astype(np.int32) b = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = np.matmul(a, b) a = Tensor(Dtype.I32, a.shape, a.flatten()) b = Tensor(Dtype.I32, b.shape, b.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "matmul_i32_2x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x1(): a = np.random.randint(-127, 127, (2, 1)).astype(np.int32) b = np.random.randint(-127, 127, (1, 2)).astype(np.int32) y = np.matmul(a, b) a = Tensor(Dtype.I32, a.shape, a.flatten()) b = Tensor(Dtype.I32, b.shape, b.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "matmul_i32_2x1" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_1x2(): a = np.random.randint(-127, 127, (1, 2)).astype(np.int32) b = np.random.randint(-127, 127, (2, 1)).astype(np.int32) y = np.matmul(a, b) a = Tensor(Dtype.I32, a.shape, a.flatten()) b = Tensor(Dtype.I32, b.shape, b.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "matmul_i32_1x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) matmul_1D() matmul_2x2() matmul_2x1() matmul_1x2() @staticmethod def matmul_i8(): def matmul_1D(): a = np.random.randint(-4, 5, (3)).astype(np.int8) b = np.random.randint(-4, 5, (3)).astype(np.int8) y = np.matmul(a, b).reshape((1)) a = Tensor(Dtype.I8, a.shape, a.flatten()) b = Tensor(Dtype.I8, b.shape, b.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "matmul_i8_1d" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x2(): a = np.random.randint(-4, 5, (2, 2)).astype(np.int8) b = np.random.randint(-4, 5, (2, 2)).astype(np.int8) y = np.matmul(a, b) a = Tensor(Dtype.I8, a.shape, a.flatten()) b = Tensor(Dtype.I8, b.shape, b.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "matmul_i8_2x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x1(): a = np.random.randint(-4, 5, (2, 1)).astype(np.int8) b = np.random.randint(-4, 5, (1, 2)).astype(np.int8) y = np.matmul(a, b) a = Tensor(Dtype.I8, a.shape, a.flatten()) b = Tensor(Dtype.I8, b.shape, b.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "matmul_i8_2x1" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_1x2(): a = np.random.randint(-4, 5, (1, 2)).astype(np.int8) b = np.random.randint(-4, 5, (2, 1)).astype(np.int8) y = np.matmul(a, b) a = Tensor(Dtype.I8, a.shape, a.flatten()) b = Tensor(Dtype.I8, b.shape, b.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "matmul_i8_1x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) matmul_1D() matmul_2x2() matmul_2x1() matmul_1x2() @staticmethod def matmul_fp8x23(): def matmul_1D(): a = np.random.randint(-3, 4, (3)).astype(np.int64) b = np.random.randint(-3, 4, (3)).astype(np.int64) y = np.matmul(a, b).reshape((1)) a = Tensor(Dtype.FP8x23, a.shape, to_fp( a.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, b.shape, to_fp( b.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "matmul_fp8x23_1d" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x2(): a = np.random.randint(-3, 4, (2, 2)).astype(np.int64) b = np.random.randint(-3, 4, (2, 2)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP8x23, a.shape, to_fp( a.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, b.shape, to_fp( b.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "matmul_fp8x23_2x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x1(): a = np.random.randint(-3, 4, (2, 1)).astype(np.int64) b = np.random.randint(-3, 4, (1, 2)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP8x23, a.shape, to_fp( a.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, b.shape, to_fp( b.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "matmul_fp8x23_2x1" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_1x2(): a = np.random.randint(-3, 4, (1, 2)).astype(np.int64) b = np.random.randint(-3, 4, (2, 1)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP8x23, a.shape, to_fp( a.flatten(), FixedImpl.FP8x23)) b = Tensor(Dtype.FP8x23, b.shape, to_fp( b.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "matmul_fp8x23_1x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) matmul_1D() matmul_2x2() matmul_2x1() matmul_1x2() @staticmethod def matmul_fp16x16(): def matmul_1D(): a = np.random.randint(-3, 4, (3)).astype(np.int64) b = np.random.randint(-3, 4, (3)).astype(np.int64) y = np.matmul(a, b).reshape((1)) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "matmul_fp16x16_1d" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x2(): a = np.random.randint(-3, 4, (2, 2)).astype(np.int64) b = np.random.randint(-3, 4, (2, 2)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "matmul_fp16x16_2x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_2x1(): a = np.random.randint(-3, 4, (2, 1)).astype(np.int64) b = np.random.randint(-3, 4, (1, 2)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "matmul_fp16x16_2x1" make_test( [a, b], y, "input_0.matmul(@input_1)", name) def matmul_1x2(): a = np.random.randint(-3, 4, (1, 2)).astype(np.int64) b = np.random.randint(-3, 4, (2, 1)).astype(np.int64) y = np.matmul(a, b) a = Tensor(Dtype.FP16x16, a.shape, to_fp( a.flatten(), FixedImpl.FP16x16)) b = Tensor(Dtype.FP16x16, b.shape, to_fp( b.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "matmul_fp16x16_1x2" make_test( [a, b], y, "input_0.matmul(@input_1)", name) matmul_1D() matmul_2x2() matmul_2x1() matmul_1x2()
https://github.com/gizatechxyz/orion
nodegen/node/max.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Max(RunAll): @staticmethod def max_u32_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.maximum(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "max_u32_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.maximum(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "max_u32_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def max_i32_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) z = np.maximum(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "max_i32_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int32) y = np.random.randint(0, 6, (1, 2)).astype(np.int32) z = np.maximum(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "max_i32_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def max_i8_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) z = np.maximum(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "max_i8_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int8) y = np.random.randint(0, 6, (1, 2)).astype(np.int8) z = np.maximum(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "max_i8_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def max_fp8x23_two_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.maximum(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "max_fp8x23_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.maximum(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "max_fp8x23_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def max_fp16x16_two_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.maximum(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "max_fp16x16_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.maximum(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "max_fp16x16_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::max(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def max_u32_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) m = Tensor(Dtype.U32, m.shape, m.flatten()) name = "max_u32_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.random.randint(0, 6, (1, 1)).astype(np.uint32) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) m = Tensor(Dtype.U32, m.shape, m.flatten()) name = "max_u32_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def max_i32_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) m = Tensor(Dtype.I32, m.shape, m.flatten()) name = "max_i32_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int32) y = np.random.randint(0, 6, (1, 2)).astype(np.int32) z = np.random.randint(0, 6, (1, 1)).astype(np.int32) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) m = Tensor(Dtype.I32, m.shape, m.flatten()) name = "max_i32_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def max_i8_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) m = Tensor(Dtype.I8, m.shape, m.flatten()) name = "max_i8_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int8) y = np.random.randint(0, 6, (1, 2)).astype(np.int8) z = np.random.randint(0, 6, (1, 1)).astype(np.int8) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) m = Tensor(Dtype.I8, m.shape, m.flatten()) name = "max_i8_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def max_fp8x23_three_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) m = Tensor(Dtype.FP8x23, m.shape, to_fp( m.flatten(), FixedImpl.FP8x23)) name = "max_fp8x23_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.random.randint(-3, 3, (1, 1)).astype(np.float64) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) m = Tensor(Dtype.FP8x23, m.shape, to_fp( m.flatten(), FixedImpl.FP8x23)) name = "max_fp8x23_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def max_fp16x16_three_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) m = Tensor(Dtype.FP16x16, m.shape, to_fp( m.flatten(), FixedImpl.FP16x16)) name = "max_fp16x16_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.random.randint(-3, 3, (1, 1)).astype(np.float64) m = np.maximum(np.maximum(x, y), z) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) m = Tensor(Dtype.FP16x16, m.shape, to_fp( m.flatten(), FixedImpl.FP16x16)) name = "max_fp16x16_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::max(array![input_0, input_1, input_2].span())", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/min.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Min(RunAll): @staticmethod def min_u32_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.minimum(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "min_u32_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.minimum(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "min_u32_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def min_i32_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) z = np.minimum(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "min_i32_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int32) y = np.random.randint(0, 6, (1, 2)).astype(np.int32) z = np.minimum(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "min_i32_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def min_i8_two_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) z = np.minimum(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "min_i8_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int8) y = np.random.randint(0, 6, (1, 2)).astype(np.int8) z = np.minimum(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "min_i8_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def min_fp8x23_two_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.minimum(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "min_fp8x23_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.minimum(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "min_fp8x23_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def min_fp16x16_two_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.minimum(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "min_fp16x16_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.minimum(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "min_fp16x16_broadcast_two_tensors" make_test([x, y], z, "TensorTrait::min(array![input_0, input_1].span())", name) default() broadcast() @staticmethod def min_u32_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) m = Tensor(Dtype.U32, m.shape, m.flatten()) name = "min_u32_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.random.randint(0, 6, (1, 1)).astype(np.uint32) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) m = Tensor(Dtype.U32, m.shape, m.flatten()) name = "min_u32_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def min_i32_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) m = Tensor(Dtype.I32, m.shape, m.flatten()) name = "min_i32_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int32) y = np.random.randint(0, 6, (1, 2)).astype(np.int32) z = np.random.randint(0, 6, (1, 1)).astype(np.int32) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) m = Tensor(Dtype.I32, m.shape, m.flatten()) name = "min_i32_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def min_i8_three_tensors(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) z = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) m = Tensor(Dtype.I8, m.shape, m.flatten()) name = "min_i8_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.int8) y = np.random.randint(0, 6, (1, 2)).astype(np.int8) z = np.random.randint(0, 6, (1, 1)).astype(np.int8) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) m = Tensor(Dtype.I8, m.shape, m.flatten()) name = "min_i8_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def min_fp8x23_three_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) m = Tensor(Dtype.FP8x23, m.shape, to_fp( m.flatten(), FixedImpl.FP8x23)) name = "min_fp8x23_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.random.randint(-3, 3, (1, 1)).astype(np.float64) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) m = Tensor(Dtype.FP8x23, m.shape, to_fp( m.flatten(), FixedImpl.FP8x23)) name = "min_fp8x23_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) default() broadcast() @staticmethod def min_fp16x16_three_tensors(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) m = Tensor(Dtype.FP16x16, m.shape, to_fp( m.flatten(), FixedImpl.FP16x16)) name = "min_fp16x16_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.random.randint(-3, 3, (1, 1)).astype(np.float64) m = np.minimum(np.minimum(x, y), z) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) m = Tensor(Dtype.FP16x16, m.shape, to_fp( m.flatten(), FixedImpl.FP16x16)) name = "min_fp16x16_broadcast_three_tensors" make_test([x, y, z], m, "TensorTrait::min(array![input_0, input_1, input_2].span())", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/mul.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Mul(RunAll): @staticmethod def mul_u32(): def default(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32) z = x * y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "mul_u32" make_test([x, y], z, "input_0 * input_1", name) def broadcast(): x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32) z = x * y x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "mul_u32_broadcast" make_test([x, y], z, "input_0 * input_1", name) default() broadcast() @staticmethod def mul_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = x * y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "mul_i32" make_test([x, y], z, "input_0 * input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32) z = x * y x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "mul_i32_broadcast" make_test([x, y], z, "input_0 * input_1", name) default() broadcast() @staticmethod def mul_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = x * y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "mul_i8" make_test([x, y], z, "input_0 * input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8) z = x * y x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.I8, z.shape, z.flatten()) name = "mul_i8_broadcast" make_test([x, y], z, "input_0 * input_1", name) default() broadcast() @staticmethod def mul_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x * y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "mul_fp8x23" make_test([x, y], z, "input_0 * input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x * y x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "mul_fp8x23_broadcast" make_test([x, y], z, "input_0 * input_1", name) default() broadcast() @staticmethod def mul_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = x * y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "mul_fp16x16" make_test([x, y], z, "input_0 * input_1", name) def broadcast(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64) z = x * y x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "mul_fp16x16_broadcast" make_test([x, y], z, "input_0 * input_1", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/neg.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Neg(RunAll): @staticmethod def neg_i32(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) y = np.negative(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "neg_i32" make_test([x], y, "input_0.neg()", name) @staticmethod def neg_i8(): x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) y = np.negative(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "neg_i8" make_test([x], y, "input_0.neg()", name) @staticmethod def neg_fp8x23(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP8x23) y = np.negative(x) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "neg_fp8x23" make_test([x], y, "input_0.neg()", name) @staticmethod def neg_fp16x16(): x = to_fp(np.random.randint(-127, 127, (2, 2) ).astype(np.int64), FixedImpl.FP16x16) y = np.negative(x) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "neg_fp16x16" make_test([x], y, "input_0.neg()", name)
https://github.com/gizatechxyz/orion
nodegen/node/nonzero.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Nonzero(RunAll): @staticmethod def nonzero_u32(): def nonzero_2D(): x = np.random.randint(0, 255, (2, 4)).astype(np.uint32) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_u32_2d" make_test( [x], y, "input_0.nonzero()", name) def nonzero_3D(): x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_u32_3d" make_test( [x], y, "input_0.nonzero()", name) nonzero_2D() nonzero_3D() @staticmethod def nonzero_i32(): def nonzero_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int32) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_i32_2d" make_test( [x], y, "input_0.nonzero()", name) def nonzero_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_i32_3d" make_test( [x], y, "input_0.nonzero()", name) nonzero_2D() nonzero_3D() @staticmethod def nonzero_i8(): def nonzero_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int8) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_i8_2d" make_test( [x], y, "input_0.nonzero()", name) def nonzero_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_i8_3d" make_test( [x], y, "input_0.nonzero()", name) nonzero_2D() nonzero_3D() @staticmethod def nonzero_fp8x23(): def nonzero_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP8x23) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_fp8x23_2d" make_test( [x], y, "input_0.nonzero()", name) def nonzero_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP8x23) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_fp8x23_3d" make_test( [x], y, "input_0.nonzero()", name) nonzero_2D() nonzero_3D() @staticmethod def nonzero_fp16x16(): def nonzero_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP16x16) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_fp16x16_2d" make_test( [x], y, "input_0.nonzero()", name) def nonzero_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP16x16) y = np.array(np.nonzero(x), dtype=np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "nonzero_fp16x16_3d" make_test( [x], y, "input_0.nonzero()", name) nonzero_2D() nonzero_3D()
https://github.com/gizatechxyz/orion
nodegen/node/not.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_node, make_test, Tensor, Dtype class Not(RunAll): @staticmethod def not_bool(): x = np.random.uniform(True, False, (1, 1)).astype(bool) y = ~(x) x = Tensor(Dtype.Bool, x.shape, x.flatten()) y = Tensor(Dtype.Bool, y.shape, y.flatten()) name = "not_bool" make_node([x], [y], name) make_test([x], y, "input_0", name)
https://github.com/gizatechxyz/orion
nodegen/node/or.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Or(RunAll): @staticmethod def or_u32(): def default(): x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32) z = np.logical_or(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_u32" make_test([x, y], z, "input_0.or(@input_1)", name) def broadcast(): x = np.random.randint(0, 6, (2, 2)).astype(np.uint32) y = np.random.randint(0, 6, (1, 2)).astype(np.uint32) z = np.logical_or(x, y) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_u32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) default() broadcast() @staticmethod def or_i32(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32) z = np.logical_or(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i32" make_test([x, y], z, "input_0.or(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int32) y = np.random.randint(-3, 3, (1, 2)).astype(np.int32) z = np.logical_or(x, y) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) default() broadcast() @staticmethod def or_i8(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8) z = np.logical_or(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i8" make_test([x, y], z, "input_0.or(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.int8) y = np.random.randint(-3, 3, (1, 2)).astype(np.int8) z = np.logical_or(x, y) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i8_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) default() broadcast() @staticmethod def or_fp8x23(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.logical_or(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp8x23" make_test([x, y], z, "input_0.or(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.logical_or(x, y) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp8x23_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) default() broadcast() @staticmethod def or_fp16x16(): def default(): x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64) z = np.logical_or(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp16x16" make_test([x, y], z, "input_0.or(@input_1)", name) def broadcast(): x = np.random.randint(-3, 3, (2, 2)).astype(np.float64) y = np.random.randint(-3, 3, (1, 2)).astype(np.float64) z = np.logical_or(x, y) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp16x16_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/pow.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Pow(RunAll): @staticmethod def pow_fp8x23(): def default(): x = np.array([1, 2, 3]).astype(np.float64) y = np.array([1, 2, 3]).astype(np.float64) z = np.array(pow(x, y), dtype=np.float64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "pow_fp8x23" make_test([x, y], z, "input_0.pow(@input_1)", name) def broadcast(): x = np.array([1, 2, 3]).astype(np.float64) y = np.array([2]).astype(np.float64) z = np.array(pow(x, y), dtype=np.float64) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) z = Tensor(Dtype.FP8x23, z.shape, to_fp( z.flatten(), FixedImpl.FP8x23)) name = "pow_fp8x23_broadcast" make_test([x, y], z, "input_0.pow(@input_1)", name) default() broadcast() @staticmethod def and_fp16x16(): def default(): x = np.array([1, 2, 3]).astype(np.float64) y = np.array([1, 2, 3]).astype(np.float64) z = np.array(pow(x, y), dtype=np.float64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "pow_fp16x16" make_test([x, y], z, "input_0.pow(@input_1)", name) def broadcast(): x = np.array([1, 2, 3]).astype(np.float64) y = np.array([2]).astype(np.float64) z = np.array(pow(x, y), dtype=np.float64) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) z = Tensor(Dtype.FP16x16, z.shape, to_fp( z.flatten(), FixedImpl.FP16x16)) name = "pow_fp16x16_broadcast" make_test([x, y], z, "input_0.pow(@input_1)", name) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/random_uniform_like.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def random_uniform_like(x: np.ndarray, high: int=1,low: int=0,seed: int=25) ->np.ndarray: dtype = np.float64 if seed is None or np.isnan(seed): # type: ignore state = np.random.RandomState() else: state = np.random.RandomState(seed=int(seed)) # type: ignore res = state.rand(*x.shape).astype(dtype) res *= high - low # type: ignore res += low # type: ignore return (res.astype(dtype),) def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]: match dtype: case Dtype.FP8x23: return ["Option::Some(FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()] case Dtype.FP16x16: return ["Option::Some(FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()] case Dtype.U32: return [f"Option::Some({int(x)})" for x in data.flatten()] class Random_uniform_like(RunAll): @staticmethod def fp8x23(): x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float64) y = random_uniform_like(x) args = [10, 1] args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y[0].shape, to_fp( y[0].flatten(), FixedImpl.FP8x23)) name = "random_uniform_like_fp8x23" make_test( [x], # List of input tensors. y, # The expected output result. f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))", # The code signature. name # The name of the generated folder. ) @staticmethod def fp16x16(): x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float16) y = random_uniform_like(x) args = [10, 1] args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y[0].shape, to_fp( y[0].flatten(), FixedImpl.FP16x16)) name = "random_uniform_like_fp16x16" make_test( [x], # List of input tensors. y, # The expected output result. f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))", # The code signature. name # The name of the generated folder. ) # @staticmethod # def fp64x64(): # x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) # y = random_uniform_like(x) # x = Tensor(Dtype.FP64x64, x.shape, to_fp( # x.flatten(), FixedImpl.FP64x64)) # y = Tensor(Dtype.FP64x64, y[0].shape, to_fp( # y[0].flatten(), FixedImpl.FP64x64)) # name = "random_uniform_like_fp64x64" # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", # name) # @staticmethod # def fpi8(): # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int8) # y = random_uniform_like(x) # x = Tensor(Dtype.I8, x.shape, x.flatten()) # y = Tensor(Dtype.I8, y[0].shape, y[0].flatten()) # name = "random_uniform_like_i8" # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", # name) # @staticmethod # def fpi32(): # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int32) # y = random_uniform_like(x) # x = Tensor(Dtype.I32, x.shape, x.flatten()) # y = Tensor(Dtype.I32, y[0].shape, y[0].flatten()) # name = "random_uniform_like_i32" # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", # name) # @staticmethod # def fpu32(): # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.uint32) # y = random_uniform_like(x) # args = [5, 1, 10] # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) # x = Tensor(Dtype.U32, x.shape, x.flatten()) # y = Tensor(Dtype.U32, y[0].shape, y[0].flatten()) # name = "random_uniform_like_u32" # make_test( # [x], # List of input tensors. # y, # The expected output result. # f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)})", # The code signature. # name # The name of the generated folder. # )
https://github.com/gizatechxyz/orion
nodegen/node/range.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement class Range(RunAll): @staticmethod # We test here with fp8x23 implementation. def fp8x23(): args = [1, 5, 0.3] args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) y = np.arange(*args) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) # Define the name of the generated folder. name = "range_fp8x23" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::range({','.join(args_str)})", # The code signature. name, # The name of the generated folder. ) @staticmethod # We test here with fp16x16 implementation. def fp16x16(): args = [1, 25, 3] args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) y = np.arange(*args) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # Define the name of the generated folder. name = "range_fp16x16" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::range({','.join(args_str)})", # The code signature. name, # The name of the generated folder. ) @staticmethod # We test here with i8 implementation. def i8(): args = [-1, 25, 3] args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) y = np.arange(*args) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.I8, y.shape, y.flatten()) # Define the name of the generated folder. name = "range_i8" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::range({','.join(args_str)})", # The code signature. name, # The name of the generated folder. ) @staticmethod # We test here with i32 implementation. def i32(): args = [21, 2, -3] args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) y = np.arange(*args) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.I32, y.shape, y.flatten()) # Define the name of the generated folder. name = "range_i32" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::range({','.join(args_str)})", # The code signature. name, # The name of the generated folder. ) @staticmethod # We test here with u32 implementation. def u32(): args = [1, 25, 3] args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) y = np.arange(*args) print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.U32, y.shape, y.flatten()) # Define the name of the generated folder. name = "range_u32" # Invoke `make_test` method to generate corresponding Cairo tests: make_test( [], # List of input tensors. y, # The expected output result. f"TensorTrait::range({','.join(args_str)})", # The code signature. name, # The name of the generated folder. )
https://github.com/gizatechxyz/orion
nodegen/node/reduce_l1.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl import numpy as np class Reduce_l1(RunAll): @staticmethod def reduce_l1_fp8x23(): def reduce_l1_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l1_fp8x23_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_l1(2, false)", name) def reduce_l1_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l1_fp8x23_export_keepdims" make_test( [x], y, "input_0.reduce_l1(2, true)", name) def reduce_l1_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l1_fp8x23_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_l1(0, true)", name) reduce_l1_export_do_not_keepdims() reduce_l1_export_keepdims() reduce_l1_axis_0() @staticmethod def reduce_l1_fp16x16(): def reduce_l1_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l1_fp16x16_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_l1(2, false)", name) def reduce_l1_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l1_fp16x16_export_keepdims" make_test( [x], y, "input_0.reduce_l1(2, true)", name) def reduce_l1_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l1_fp16x16_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_l1(0, true)", name) reduce_l1_export_do_not_keepdims() reduce_l1_export_keepdims() reduce_l1_axis_0() @staticmethod def reduce_l1_i8(): def reduce_l1_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int8) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_l1_i8_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_l1(2, false)", name) def reduce_l1_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int8) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_l1_i8_export_keepdims" make_test( [x], y, "input_0.reduce_l1(2, true)", name) def reduce_l1_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int8) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_l1_i8_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_l1(0, true)", name) reduce_l1_export_do_not_keepdims() reduce_l1_export_keepdims() reduce_l1_axis_0() @staticmethod def reduce_l1_i32(): def reduce_l1_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int32) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_l1_i32_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_l1(2, false)", name) def reduce_l1_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_l1_i32_export_keepdims" make_test( [x], y, "input_0.reduce_l1(2, true)", name) def reduce_l1_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_l1_i32_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_l1(0, true)", name) reduce_l1_export_do_not_keepdims() reduce_l1_export_keepdims() reduce_l1_axis_0() @staticmethod def reduce_l1_u32(): def reduce_l1_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.uint32) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=False).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_l1_u32_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_l1(2, false)", name) def reduce_l1_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.uint32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_l1_u32_export_keepdims" make_test( [x], y, "input_0.reduce_l1(2, true)", name) def reduce_l1_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.uint32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.abs(x), axis=tuple(axes), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_l1_u32_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_l1(0, true)", name) reduce_l1_export_do_not_keepdims() reduce_l1_export_keepdims() reduce_l1_axis_0()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_l2.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl import numpy as np class Reduce_l2(RunAll): @staticmethod def reduce_l2_fp8x23(): def reduce_l2_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=False)).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l2_fp8x23_export_do_not_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(2, false)", name) def reduce_l2_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l2_fp8x23_export_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(2, true)", name) def reduce_l2_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_l2_fp8x23_export_negative_axes_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(0, true)", name) reduce_l2_export_do_not_keepdims() reduce_l2_export_keepdims() reduce_l2_axis_0() @staticmethod def reduce_l2_fp16x16(): def reduce_l2_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=False)).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l2_fp16x16_export_do_not_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(2, false)", name) def reduce_l2_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l2_fp16x16_export_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(2, true)", name) def reduce_l2_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sqrt(np.sum(a=np.square(x), axis=tuple(axes), keepdims=True)).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_l2_fp16x16_export_negative_axes_keepdims" make_node([x], [y], name) make_test( [x], y, "input_0.reduce_l2(0, true)", name) reduce_l2_export_do_not_keepdims() reduce_l2_export_keepdims() reduce_l2_axis_0() @staticmethod def reduce_l2_complex64(): def reduce_l2_axis_0(): shape = [2, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j, 4.- 1.j]), shape) y = np.sqrt(np.sum(a=np.square(abs(x)), axis=tuple(axes), keepdims=True)) print(to_fp(x.flatten(), FixedImpl.FP64x64)) x = Tensor(Dtype.COMPLEX64, x.shape, to_fp( x.flatten(), FixedImpl.FP64x64)) y = Tensor(Dtype.COMPLEX64, y.shape, to_fp( y.flatten(), FixedImpl.FP64x64)) name = "reduce_l2_complex64_axis_0" make_test( [x], y, "input_0.reduce_l2(0, true)", name) reduce_l2_axis_0()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_log_sum.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Reduce_log_sum(RunAll): @staticmethod def reduce_log_sum_fp8x23(): def reduce_log_sum_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) y = np.log(np.sum(x, axis=tuple(axes), keepdims=False)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp8x23_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_log_sum(2, false)", name) def reduce_log_sum_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.log(np.sum(x, axis=tuple(axes), keepdims=True)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp8x23_export_keepdims" make_test( [x], y, "input_0.reduce_log_sum(2, true)", name) def reduce_log_sum_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) y = np.log(np.sum(x, axis=tuple(axes), keepdims=True)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp8x23_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_log_sum(0, true)", name) reduce_log_sum_export_do_not_keepdims() reduce_log_sum_export_keepdims() reduce_log_sum_axis_0() @staticmethod def reduce_log_sum_fp16x16(): def reduce_log_sum_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.log(np.sum(x, axis=tuple(axes), keepdims=False)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp16x16_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_log_sum(2, false)", name) def reduce_log_sum_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.log(np.sum(x, axis=tuple(axes), keepdims=True)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp16x16_export_keepdims" make_test( [x], y, "input_0.reduce_log_sum(2, true)", name) def reduce_log_sum_axis_0(): shape = [2, 2, 2] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.log(np.sum(x, axis=tuple(axes), keepdims=True)) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_log_sum_fp16x16_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_log_sum(0, true)", name) reduce_log_sum_export_do_not_keepdims() reduce_log_sum_export_keepdims() reduce_log_sum_axis_0()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_log_sum_exp.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, Tensor, Dtype, FixedImpl, to_fp class Reduce_log_sum_exp(RunAll): @staticmethod def reduce_log_sum_exp_fp32x32(): def reduce_log_sum_exp_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=False)).astype(np.float64) x = Tensor(Dtype.FP32x32, x.shape, to_fp( x.flatten(), FixedImpl.FP32x32)) y = Tensor(Dtype.FP32x32, y.shape, to_fp( y.flatten(), FixedImpl.FP32x32)) name = "reduce_log_sum_exp_fp32x32_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_log_sum_exp(2, false)", name) def reduce_log_sum_exp_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) x = Tensor(Dtype.FP32x32, x.shape, to_fp( x.flatten(), FixedImpl.FP32x32)) y = Tensor(Dtype.FP32x32, y.shape, to_fp( y.flatten(), FixedImpl.FP32x32)) name = "reduce_log_sum_exp_fp32x32_export_keepdims" make_test( [x], y, "input_0.reduce_log_sum_exp(2, true)", name) def reduce_log_sum_exp_axis_0(): shape = [3, 2, 2] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1), shape) y = np.log(np.sum(np.exp(x), axis=tuple(axes), keepdims=True)).astype(np.float64) x = Tensor(Dtype.FP32x32, x.shape, to_fp( x.flatten(), FixedImpl.FP32x32)) y = Tensor(Dtype.FP32x32, y.shape, to_fp( y.flatten(), FixedImpl.FP32x32)) name = "reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_log_sum_exp(0, true)", name) reduce_log_sum_exp_export_do_not_keepdims() reduce_log_sum_exp_export_keepdims() reduce_log_sum_exp_axis_0()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_mean.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Reduce_mean(RunAll): @staticmethod def reduce_mean_u32(): def reduce_mean_1D(): x = np.array([0, 1, 2,]).astype(np.uint32) y = np.mean(x, keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_mean_u32_1D" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_mean_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.mean(x, keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_mean_u32_2D_default" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.mean(x, keepdims=False).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_mean_u32_2D_keepdims" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.mean(x, axis=(1), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_mean_u32_2D_axis_1" make_test( [x], y, "input_0.reduce_mean(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_mean_1D() reduce_mean_2D() @staticmethod def reduce_mean_i32(): def reduce_mean_1D(): x = np.array([0, 1, 2,]).astype(np.int32) y = np.mean(x, keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_mean_i32_1D" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_mean_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.mean(x, keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_mean_i32_2D_default" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.mean(x, keepdims=False).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_mean_i32_2D_keepdims" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.mean(x, axis=(1), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_mean_i32_2D_axis_1" make_test( [x], y, "input_0.reduce_mean(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_mean_1D() reduce_mean_2D() @staticmethod def reduce_mean_i8(): def reduce_mean_1D(): x = np.array([0, 1, 2,]).astype(np.int8) y = np.mean(x, keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_mean_i8_1D" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_mean_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.mean(x, keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_mean_i8_2D_default" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.mean(x, keepdims=False).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_mean_i8_2D_keepdims" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.mean(x, axis=(1), keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_mean_i8_2D_axis_1" make_test( [x], y, "input_0.reduce_mean(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_mean_1D() reduce_mean_2D() @staticmethod def reduce_mean_fp8x23(): def reduce_mean_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.mean(x, keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_mean_fp8x23_1D" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_mean_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_mean_fp8x23_2D_default" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, keepdims=False) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_mean_fp8x23_2D_keepdims" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, axis=(1), keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_mean_fp8x23_2D_axis_1" make_test( [x], y, "input_0.reduce_mean(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_mean_1D() reduce_mean_2D() @staticmethod def reduce_mean_fp16x16(): def reduce_mean_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.mean(x, keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_mean_fp16x16_1D" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_mean_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_mean_fp16x16_2D_default" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, keepdims=False) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_mean_fp16x16_2D_keepdims" make_test( [x], y, "input_0.reduce_mean(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.mean(x, axis=(1), keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_mean_fp16x16_2D_axis_1" make_test( [x], y, "input_0.reduce_mean(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_mean_1D() reduce_mean_2D()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_min.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Reduce_min(RunAll): @staticmethod def reduce_min_u32(): def reduce_min_1D(): x = np.array([0, 1, 2,]).astype(np.uint32) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_min_u32_1D" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_min_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_min_u32_2D_default" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=False).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_min_u32_2D_keepdims" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.minimum.reduce(x, axis=(1), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_min_u32_2D_axis_1" make_test( [x], y, "input_0.reduce_min(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_min_1D() reduce_min_2D() @staticmethod def reduce_min_i32(): def reduce_min_1D(): x = np.array([0, 1, 2,]).astype(np.int32) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_min_i32_1D" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_min_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_min_i32_2D_default" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=False).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_min_i32_2D_keepdims" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.minimum.reduce(x, axis=(1), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_min_i32_2D_axis_1" make_test( [x], y, "input_0.reduce_min(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_min_1D() reduce_min_2D() @staticmethod def reduce_min_i8(): def reduce_min_1D(): x = np.array([0, 1, 2,]).astype(np.int8) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_min_i8_1D" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_min_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_min_i8_2D_default" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=False).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_min_i8_2D_keepdims" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.minimum.reduce(x, axis=(1), keepdims=True).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_min_i8_2D_axis_1" make_test( [x], y, "input_0.reduce_min(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_min_1D() reduce_min_2D() @staticmethod def reduce_min_fp8x23(): def reduce_min_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.minimum.reduce(x, axis=None, keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_min_fp8x23_1D" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_min_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_min_fp8x23_2D_default" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=False) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_min_fp8x23_2D_keepdims" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=(1), keepdims=True) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "reduce_min_fp8x23_2D_axis_1" make_test( [x], y, "input_0.reduce_min(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_min_1D() reduce_min_2D() @staticmethod def reduce_min_fp16x16(): def reduce_min_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.minimum.reduce(x, axis=None, keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_min_fp16x16_1D" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def reduce_min_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_min_fp16x16_2D_default" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::None(()), Option::None(()))", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=None, keepdims=False) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_min_fp16x16_2D_keepdims" make_test( [x], y, "input_0.reduce_min(Option::None(()), Option::Some(false), Option::None(()))", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.minimum.reduce(x, axis=(1), keepdims=True) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "reduce_min_fp16x16_2D_axis_1" make_test( [x], y, "input_0.reduce_min(Option::Some(array![1].span()), Option::None(()), Option::None(()))", name) default() keepdims() axis_1() reduce_min_1D() reduce_min_2D()
https://github.com/gizatechxyz/orion
nodegen/node/reduce_sum.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Reduce_sum(RunAll): @staticmethod def reduce_sum_no_keep_dims(): axes = np.array([1], dtype=np.uint32) keepdims = 0 x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ [9, 10], [11, 12]]]).astype(np.uint32) y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_no_keep_dims" make_test( [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None)", name) @staticmethod def reduce_sum_keep_dims(): axes = np.array([1], dtype=np.uint32) keepdims = 1 x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ [9, 10], [11, 12]]]).astype(np.uint32) y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_keep_dims" make_test( [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None)", name) @staticmethod def reduce_sum_default_axes_keepdims(): keepdims = 1 x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ [9, 10], [11, 12]]]).astype(np.uint32) y = np.sum(x, axis=None, keepdims=keepdims == 1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_default_axes_keepdims" make_test( [x], y, "input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None)", name) @staticmethod def reduce_sum_negative_axes_keepdims(): axes = np.array([-2], dtype=np.int64) keepdims = 1 x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ [9, 10], [11, 12]]]).astype(np.uint32) y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum(Option::Some(array![-2].span()), Option::Some(true), Option::None)", name) @staticmethod def reduce_sum_empty_axes_input_noop(): x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ [9, 10], [11, 12]]]).astype(np.uint32) y = np.array(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_empty_axes_input_noop" make_test( [x], y, "input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true))", name)
https://github.com/gizatechxyz/orion
nodegen/node/reduce_sum_square.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl import numpy as np class Reduce_sum_square(RunAll): @staticmethod def reduce_sum_square_fp8x23(): def reduce_sum_square_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=False).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_sum_square_fp8x23_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, false)", name) def reduce_sum_square_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_sum_square_fp8x23_export_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, true)", name) def reduce_sum_square_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "reduce_sum_square_fp8x23_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum_square(0, true)", name) reduce_sum_square_export_do_not_keepdims() reduce_sum_square_export_keepdims() reduce_sum_square_axis_0() @staticmethod def reduce_sum_square_fp16x16(): def reduce_sum_square_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=False).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_sum_square_fp16x16_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, false)", name) def reduce_sum_square_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_sum_square_fp16x16_export_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, true)", name) def reduce_sum_square_axis_0(): shape = [2, 2, 2] axes = np.array([0], dtype=np.int64) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int64) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reduce_sum_square_fp16x16_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum_square(0, true)", name) reduce_sum_square_export_do_not_keepdims() reduce_sum_square_export_keepdims() reduce_sum_square_axis_0() @staticmethod def reduce_sum_square_i8(): def reduce_sum_square_export_do_not_keepdims(): shape = [2, 2, 2] axes = np.array([2], dtype=np.int8) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=False).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_sum_square_i8_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, false)", name) def reduce_sum_square_export_keepdims(): shape = [2, 2, 2] axes = np.array([2], dtype=np.int8) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_sum_square_i8_export_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, true)", name) def reduce_sum_square_axis_0(): shape = [2, 2, 2] axes = np.array([0], dtype=np.int8) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int8) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reduce_sum_square_i8_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum_square(0, true)", name) reduce_sum_square_export_do_not_keepdims() reduce_sum_square_export_keepdims() reduce_sum_square_axis_0() @staticmethod def reduce_sum_square_i32(): def reduce_sum_square_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int32) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=False).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_sum_square_i32_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, false)", name) def reduce_sum_square_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.int32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_sum_square_i32_export_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, true)", name) def reduce_sum_square_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.int32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reduce_sum_square_i32_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum_square(0, true)", name) reduce_sum_square_export_do_not_keepdims() reduce_sum_square_export_keepdims() reduce_sum_square_axis_0() @staticmethod def reduce_sum_square_u32(): def reduce_sum_square_export_do_not_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.uint32) keepdims = False x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=False).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_square_u32_export_do_not_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, false)", name) def reduce_sum_square_export_keepdims(): shape = [3, 2, 2] axes = np.array([2], dtype=np.uint32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_square_u32_export_keepdims" make_test( [x], y, "input_0.reduce_sum_square(2, true)", name) def reduce_sum_square_axis_0(): shape = [3, 3, 3] axes = np.array([0], dtype=np.uint32) keepdims = True x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.uint32) y = np.sum(a=np.square(x), axis=tuple(axes), keepdims=True).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reduce_sum_square_u32_export_negative_axes_keepdims" make_test( [x], y, "input_0.reduce_sum_square(0, true)", name) reduce_sum_square_export_do_not_keepdims() reduce_sum_square_export_keepdims() reduce_sum_square_axis_0()
https://github.com/gizatechxyz/orion
nodegen/node/relu.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, Trait, FixedImpl import tensorflow as tf class Relu(RunAll): @staticmethod def relu_i32(): x = np.random.randint(-5, 9, (2, 2)).astype(np.int32) layer = tf.keras.layers.ReLU() y = layer(x).numpy() x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "relu_i32" make_test([x], y, "NNTrait::relu(@input_0)", name, Trait.NN) @staticmethod def relu_i8(): x = np.random.randint(-5, 9, (2, 2)).astype(np.int8) layer = tf.keras.layers.ReLU() y = layer(x).numpy() x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "relu_i8" make_test([x], y, "NNTrait::relu(@input_0)", name, Trait.NN) @staticmethod def relu_fp8x23(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) layer = tf.keras.layers.ReLU() y = layer(x).numpy() x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "relu_fp8x23" make_test([x], y, "NNTrait::relu(@input_0)", name, Trait.NN) @staticmethod def relu_fp16x16(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) layer = tf.keras.layers.ReLU() y = layer(x).numpy() x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "relu_fp16x16" make_test([x], y, "NNTrait::relu(@input_0)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/reshape.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, Tensor, Dtype original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.int32) def reshape_reference_implementation( data: np.ndarray, shape: np.ndarray, allowzero: int = 0 ) -> np.ndarray: # replace zeros with corresponding dim size # we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set new_shape = np.copy(shape) if allowzero == 0: zeros_index = np.where(shape == 0) new_shape[zeros_index] = np.array(data.shape)[zeros_index] reshaped = np.reshape(data, new_shape) return reshaped class Reshape(RunAll): @staticmethod def reshape_reordered_all_dims(): y = reshape_reference_implementation( data, np.array([4, 2, 3], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_reordered_all_dims" make_test([x], y, "input_0.reshape(array![4,2,3].span(), false)", name) @staticmethod def reshape_reordered_last_dims(): y = reshape_reference_implementation( data, np.array([2, 4, 3], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_reordered_last_dims" make_test([x], y, "input_0.reshape(array![2,4,3].span(), false)", name) @staticmethod def reshape_reduced_dims(): y = reshape_reference_implementation( data, np.array([2, 12], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_reduced_dims" make_test([x], y, "input_0.reshape(array![2,12].span(), false)", name) @staticmethod def reshape_extended_dims(): y = reshape_reference_implementation( data, np.array([2, 3, 2, 2], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_extended_dims" make_test([x], y, "input_0.reshape(array![2, 3, 2, 2].span(), false)", name) @staticmethod def reshape_one_dim(): y = reshape_reference_implementation( data, np.array([24], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_one_dim" make_test([x], y, "input_0.reshape(array![24].span(), false)", name) @staticmethod def reshape_negative_dim(): y = reshape_reference_implementation( data, np.array([2, -1, 2], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_negative_dim" make_test([x], y, "input_0.reshape(array![2, -1, 2].span(), false)", name) @staticmethod def reshape_negative_extended_dims(): y = reshape_reference_implementation( data, np.array([-1, 2, 3, 4], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_negative_extended_dims" make_test([x], y, "input_0.reshape(array![-1, 2, 3, 4].span(), false)", name) @staticmethod def reshape_zero_dim(): y = reshape_reference_implementation( data, np.array([2, 0, 4, 1], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_zero_dim" make_test([x], y, "input_0.reshape(array![2, 0, 4, 1].span(), false)", name) @staticmethod def reshape_zero_and_negative_dim(): y = reshape_reference_implementation( data, np.array([2, 0, 1, -1], dtype=np.int64)) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_zero_and_negative_dim" make_test([x], y, "input_0.reshape(array![2, 0, 1, -1].span(), false)", name) @staticmethod def reshape_zero_and_negative_dim(): original_shape = [0, 3, 4] data = np.random.random_sample(original_shape).astype(np.int32) y = reshape_reference_implementation( data, np.array([3, 4, 0], dtype=np.int64), allowzero=1) x = Tensor(Dtype.I32, data.shape, data.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reshape_zero_and_negative_dim" make_test([x], y, "input_0.reshape(array![3, 4, 0].span(), true)", name)
https://github.com/gizatechxyz/orion
nodegen/node/resize.py
# Python test implementation from ONNX library : https://github.com/onnx/onnx/blob/main/onnx/reference/ops/op_resize.py import numpy as np from typing import Any, Callable from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def _cartesian(arrays: list[np.ndarray], out: np.ndarray | None = None) -> np.ndarray: #From https://stackoverflow.com/a/1235363 arrays = [np.asarray(x) for x in arrays] dtype = arrays[0].dtype n = np.prod([x.size for x in arrays]) if out is None: out = np.zeros([n, len(arrays)], dtype=dtype) m = n // arrays[0].size out[:, 0] = np.repeat(arrays[0], m) if arrays[1:]: _cartesian(arrays[1:], out=out[0:m, 1:]) for j in range(1, arrays[0].size): out[j * m : (j + 1) * m, 1:] = out[0:m, 1:] return out def _get_neighbor_idxes(x: float, n: int, limit: int) -> np.ndarray: idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n] idxes = sorted(idxes) return np.array(idxes) def _get_neighbor(x: float, n: int, data: np.ndarray) -> tuple[np.ndarray, np.ndarray]: pad_width = np.ceil(n / 2).astype(int) padded = np.pad(data, pad_width, mode="edge") x += pad_width idxes = _get_neighbor_idxes(x, n, len(padded)) ret = padded[idxes] return idxes - pad_width, ret def linear_coeffs(ratio: float, scale: float | None = None) -> np.ndarray: del scale return np.array([1 - ratio, ratio]) def linear_coeffs_antialias(ratio: float, scale: float) -> np.ndarray: scale = min(scale, 1.0) start = int(np.floor(-1 / scale) + 1) footprint = 2 - 2 * start args = (np.arange(start, start + footprint) - ratio) * scale coeffs = np.clip(1 - np.abs(args), 0, 1) return np.array(coeffs) / sum(coeffs) def cubic_coeffs_antialias(ratio: float, scale: float, A: float = -0.75) -> np.ndarray: scale = min(scale, 1.0) def compute_coeff(x: float) -> float: x = abs(x) x_2 = x * x x_3 = x * x_2 if x <= 1: return (A + 2) * x_3 - (A + 3) * x_2 + 1 if x < 2: return A * x_3 - 5 * A * x_2 + 8 * A * x - 4 * A return 0.0 i_start = int(np.floor(-2 / scale) + 1) i_end = 2 - i_start args = [scale * (i - ratio) for i in range(i_start, i_end)] coeffs = [compute_coeff(x) for x in args] return np.array(coeffs) / sum(coeffs) def nearest_coeffs( ratio: float | int | np.ndarray, mode: str = "round_prefer_floor" ) -> np.ndarray: if isinstance(ratio, int) or ratio.is_integer(): return np.array([0, 1]) if mode == "round_prefer_floor": return np.array([ratio <= 0.5, ratio > 0.5]) if mode == "round_prefer_ceil": return np.array([ratio < 0.5, ratio >= 0.5]) if mode == "floor": return np.array([1, 0]) if mode == "ceil": return np.array([0, 1]) raise ValueError(f"Unexpected value {mode!r}.") def _interpolate_1d_with_x( data: np.ndarray, scale_factor: float, output_width_int: int, x: float, get_coeffs: Callable[[float, float], np.ndarray], roi: np.ndarray | None = None, extrapolation_value: float = 0.0, coordinate_transformation_mode: str = "half_pixel", exclude_outside: bool = False, ) -> np.ndarray: input_width = len(data) output_width = scale_factor * input_width if coordinate_transformation_mode == "align_corners": if output_width == 1: x_ori = 0.0 else: x_ori = x * (input_width - 1) / (output_width - 1) elif coordinate_transformation_mode == "asymmetric": x_ori = x / scale_factor elif coordinate_transformation_mode == "tf_crop_and_resize": if roi is None: raise ValueError("roi cannot be None.") if output_width == 1: x_ori = (roi[1] - roi[0]) * (input_width - 1) / 2 else: x_ori = x * (roi[1] - roi[0]) * (input_width - 1) / (output_width - 1) x_ori += roi[0] * (input_width - 1) if x_ori < 0 or x_ori > input_width - 1: return np.array(extrapolation_value) elif coordinate_transformation_mode == "pytorch_half_pixel": if output_width == 1: x_ori = -0.5 else: x_ori = (x + 0.5) / scale_factor - 0.5 elif coordinate_transformation_mode == "half_pixel": x_ori = (x + 0.5) / scale_factor - 0.5 elif coordinate_transformation_mode == "half_pixel_symmetric": adjustment = output_width_int / output_width center = input_width / 2 offset = center * (1 - adjustment) x_ori = offset + (x + 0.5) / scale_factor - 0.5 else: raise ValueError( f"Invalid coordinate_transformation_mode: {coordinate_transformation_mode!r}." ) x_ori_int = np.floor(x_ori).astype(int).item() if x_ori.is_integer(): ratio = 1 else: ratio = x_ori - x_ori_int coeffs = get_coeffs(ratio, scale_factor) n = len(coeffs) idxes, points = _get_neighbor(x_ori, n, data) if exclude_outside: for i, idx in enumerate(idxes): if idx < 0 or idx >= input_width: coeffs[i] = 0 coeffs /= sum(coeffs) return np.dot(coeffs, points).item() def _interpolate_nd_with_x( data: np.ndarray, n: int, scale_factors: list[float], output_size: list[int], x: list[float], get_coeffs: Callable[[float, float], np.ndarray], roi: np.ndarray | None = None, exclude_outside: bool = False, **kwargs: Any, ) -> np.ndarray: if n == 1: return _interpolate_1d_with_x( data, scale_factors[0], output_size[0], x[0], get_coeffs, roi=roi, exclude_outside=exclude_outside, **kwargs, ) res1d = [] for i in range(data.shape[0]): r = _interpolate_nd_with_x( data[i], n - 1, scale_factors[1:], output_size[1:], x[1:], get_coeffs, roi=None if roi is None else np.concatenate([roi[1:n], roi[n + 1 :]]), exclude_outside=exclude_outside, **kwargs, ) res1d.append(r) return _interpolate_1d_with_x( res1d, scale_factors[0], output_size[0], x[0], get_coeffs, roi=None if roi is None else [roi[0], roi[n]], exclude_outside=exclude_outside, **kwargs, ) def _get_all_coords(data: np.ndarray) -> np.ndarray: return _cartesian( [list(range(data.shape[i])) for i in range(len(data.shape))] ) def interpolate_nd( data: np.ndarray, get_coeffs: Callable[[float, float], np.ndarray], output_size: list[int] | None = None, scale_factors: list[float] | None = None, axes: list[int] | None = None, roi: np.ndarray | None = None, keep_aspect_ratio_policy: str | None = "stretch", exclude_outside: bool = False, **kwargs: Any, ) -> np.ndarray: if output_size is None and scale_factors is None: raise ValueError("output_size is None and scale_factors is None.") r = len(data.shape) if axes is not None: if scale_factors is not None: new_scale_factors = [1.0] * r for i, d in enumerate(axes): new_scale_factors[d] = scale_factors[i] scale_factors = new_scale_factors if output_size is not None: new_output_size = [data.shape[i] for i in range(r)] for i, d in enumerate(axes): new_output_size[d] = output_size[i] output_size = new_output_size if roi is not None: new_roi = ([0.0] * r) + ([1.0] * r) naxes = len(axes) for i, d in enumerate(axes): new_roi[d] = roi[i] new_roi[r + d] = roi[naxes + i] roi = new_roi else: axes = list(range(r)) if output_size is not None: scale_factors = [output_size[i] / data.shape[i] for i in range(r)] if keep_aspect_ratio_policy != "stretch": if keep_aspect_ratio_policy == "not_larger": scale = np.array(scale_factors)[axes].min() elif keep_aspect_ratio_policy == "not_smaller": scale = np.array(scale_factors)[axes].max() else: raise ValueError( f"Invalid keep_aspect_ratio_policy={keep_aspect_ratio_policy!r}" ) scale_factors = [scale if i in axes else 1.0 for i in range(r)] def round_half_up(x: float) -> int: return int(x + 0.5) output_size = [ round_half_up(scale * data.shape[i]) if i in axes else data.shape[i] for i in range(r) ] else: output_size = (scale_factors * np.array(data.shape)).astype(int) if scale_factors is None: raise ValueError("scale_factors is None.") if output_size is None: raise ValueError("output_size is None.") ret = np.zeros(output_size) for x in _get_all_coords(ret): ret[tuple(x)] = _interpolate_nd_with_x( data, len(data.shape), scale_factors, output_size, x, get_coeffs, roi=roi, exclude_outside=exclude_outside, **kwargs, ) return ret def cubic_coeffs( ratio: float, scale: float | None = None, A: float = -0.75 ) -> np.ndarray: del scale # Unused coeffs = [ ((A * (ratio + 1) - 5 * A) * (ratio + 1) + 8 * A) * (ratio + 1) - 4 * A, ((A + 2) * ratio - (A + 3)) * ratio * ratio + 1, ((A + 2) * (1 - ratio) - (A + 3)) * (1 - ratio) * (1 - ratio) + 1, ((A * ((1 - ratio) + 1) - 5 * A) * ((1 - ratio) + 1) + 8 * A) * ((1 - ratio) + 1) - 4 * A, ] return np.array(coeffs) class Resize(RunAll): @staticmethod def resize_upsample_scales_nearest() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_nearest" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_nearest() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_nearest" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 7, 8], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_sizes_nearest() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 1, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_nearest" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_linear() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x, None), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_linear" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_linear_align_corners() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x, None), scale_factors=scales, coordinate_transformation_mode="align_corners", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_linear_align_corners" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_linear() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x, None), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_linear" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_linear_align_corners() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x, None), scale_factors=scales, coordinate_transformation_mode="align_corners", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_linear_align_corners" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_cubic() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x, None), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_cubic" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_cubic_align_corners() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x), scale_factors=scales, coordinate_transformation_mode="align_corners", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_cubic_align_corners" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_cubic() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x), scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_cubic" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_cubic_align_corners() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x), scale_factors=scales, coordinate_transformation_mode="align_corners", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_cubic_align_corners" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_sizes_cubic() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 9, 10], dtype=np.int64) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x), output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_cubic" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_sizes_cubic() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 3, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x), output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_cubic" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_cubic_A_n0p5_exclude_outside() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x, A=-0.5), scale_factors=scales, exclude_outside=True, ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_cubic_A_n0p5_exclude_outside" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(FixedTrait::<FP16x16>::new(32768, true))," func_sig += "Option::Some(true)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_cubic_A_n0p5_exclude_outside() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x, A=-0.5), scale_factors=scales, exclude_outside=True, ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_cubic_A_n0p5_exclude_outside" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(FixedTrait::<FP16x16>::new(32768, true))," func_sig += "Option::Some(true)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_cubic_asymmetric() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: cubic_coeffs(x, A=-0.75), scale_factors=scales, coordinate_transformation_mode="asymmetric", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_cubic_asymmetric" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ASYMMETRIC)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_tf_crop_and_resize() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) roi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32) sizes = np.array([1, 1, 3, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), output_size=sizes, roi=roi, coordinate_transformation_mode="tf_crop_and_resize", ).astype(np.float32) x = [data, sizes, roi] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_tf_crop_and_resize" func_sig = "data.resize(" func_sig += "roi," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_tf_crop_and_resize_extrapolation_value() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) roi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32) sizes = np.array([1, 1, 3, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), output_size=sizes, roi=roi, coordinate_transformation_mode="tf_crop_and_resize", extrapolation_value=10.0, ).astype(np.float32) x = [data, sizes, roi] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_tf_crop_and_resize_extrapolation_value" func_sig = "data.resize(" func_sig += "roi," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(FixedTrait::<FP16x16>::new(655360, false))," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_downsample_sizes_linear_pytorch_half_pixel() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 3, 1], dtype=np.int64) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), output_size=sizes, coordinate_transformation_mode="pytorch_half_pixel", ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_linear_pytorch_half_pixel" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::PYTORCH_HALF_PIXEL)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_floor_align_corners() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 8, 8], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x, mode="floor"), output_size=sizes, coordinate_transformation_mode="align_corners", ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_floor_align_corners" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::Some(NEAREST_MODE::FLOOR),)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 8, 8], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x, mode="round_prefer_ceil"), output_size=sizes, coordinate_transformation_mode="asymmetric", ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::ASYMMETRIC)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::Some(NEAREST_MODE::ROUND_PREFER_CEIL),)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_ceil_half_pixel() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 8, 8], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x, mode="ceil"), output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_ceil_half_pixel" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::Some(NEAREST_MODE::CEIL),)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_linear_antialias() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) output = interpolate_nd( data, linear_coeffs_antialias, scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_linear_antialias" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::Some(1)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_sizes_linear_antialias() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 3, 3], dtype=np.int64) output = interpolate_nd( data, linear_coeffs_antialias, output_size=sizes ).astype(np.float32) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_linear_pytorch_half_pixel" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::Some(1)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_scales_cubic_antialias() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32) output = interpolate_nd( data, cubic_coeffs_antialias, scale_factors=scales ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_cubic_antialias" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::Some(1)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_downsample_sizes_cubic_antialias() -> None: data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) sizes = np.array([1, 1, 3, 3], dtype=np.int64) output = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype( np.float32 ) x = [data, sizes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_cubic_antialias" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::Some(1)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::CUBIC)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_nearest_axes_2_3() -> None: axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) scales = np.array([2.0, 3.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes ).astype(np.float32) x = [data, scales, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.FP16x16, x[1].shape, to_fp(x[1].flatten(), FixedImpl.FP16x16)) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_nearest_axes_2_3" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_upsample_scales_nearest_axes_3_2() -> None: axes = np.array([3, 2], dtype=np.int64) data = np.array([[[[1, 2],[3, 4],]]],dtype=np.float32,) scales = np.array([3.0, 2.0], dtype=np.float32) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes ).astype(np.float32) x = [data, scales, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.FP16x16, x[1].shape, to_fp(x[1].flatten(), FixedImpl.FP16x16)) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_nearest_axes_3_2" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_axes_2_3() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) sizes = np.array([7, 8], dtype=np.int64) axes = np.array([2, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_axes_2_3" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_axes_3_2() -> None: data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) sizes = np.array([8, 7], dtype=np.int64) axes = np.array([3, 2], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_axes_3_2" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_tf_crop_and_resize_axes_2_3() -> None: axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) roi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32) sizes = np.array([3, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), output_size=sizes, roi=roi, axes=axes, coordinate_transformation_mode="tf_crop_and_resize", ).astype(np.float32) x = [data, sizes, roi, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16)) x[3] = Tensor(Dtype.U32, x[3].shape, x[3].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_tf_crop_and_resize_axes_2_3" func_sig = "data.resize(" func_sig += "roi," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2], x[3]], y, func_sig, name) @staticmethod def resize_tf_crop_and_resize_axes_3_2() -> None: axes = np.array([3, 2], dtype=np.int64) data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], ] ] ], dtype=np.float32, ) roi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32) sizes = np.array([3, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), output_size=sizes, roi=roi, axes=axes, coordinate_transformation_mode="tf_crop_and_resize", ).astype(np.float32) x = [data, sizes, roi, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16)) x[3] = Tensor(Dtype.U32, x[3].shape, x[3].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_tf_crop_and_resize_axes_3_2" func_sig = "data.resize(" func_sig += "roi," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2], x[3]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_not_larger() -> None: keep_aspect_ratio_policy = "not_larger" axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) sizes = np.array([7, 8], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes, keep_aspect_ratio_policy=keep_aspect_ratio_policy, ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_not_larger" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_LARGER)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_upsample_sizes_nearest_not_smaller() -> None: keep_aspect_ratio_policy = "not_smaller" axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2], [3, 4], ] ] ], dtype=np.float32, ) sizes = np.array([7, 8], dtype=np.int64) # Results in 8x8 output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes, keep_aspect_ratio_policy=keep_aspect_ratio_policy, ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_sizes_nearest_not_smaller" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_downsample_sizes_nearest_not_larger() -> None: keep_aspect_ratio_policy = "not_larger" axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) sizes = np.array([1, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes, keep_aspect_ratio_policy=keep_aspect_ratio_policy, ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_nearest_not_larger" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_LARGER)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_downsample_sizes_nearest_not_smaller() -> None: keep_aspect_ratio_policy = "not_smaller" axes = np.array([2, 3], dtype=np.int64) data = np.array( [ [ [ [1, 2, 3, 4], [5, 6, 7, 8], ] ] ], dtype=np.float32, ) sizes = np.array([1, 3], dtype=np.int64) output = interpolate_nd( data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes, keep_aspect_ratio_policy=keep_aspect_ratio_policy, ).astype(np.float32) x = [data, sizes, axes] y = output x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16)) x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten()) x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_sizes_nearest_not_smaller" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "Option::None," func_sig += "sizes," func_sig += "Option::None," func_sig += "axes," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER)," func_sig += "Option::Some(MODE::NEAREST)," func_sig += "Option::None,)" make_test([x[0], x[1], x[2]], y, func_sig, name) @staticmethod def resize_downsample_scales_linear_half_pixel_symmetric() -> None: data = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32) scales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), scale_factors=scales, coordinate_transformation_mode="half_pixel_symmetric", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_downsample_scales_linear_half_pixel_symmetric" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name) @staticmethod def resize_upsample_scales_linear_half_pixel_symmetric() -> None: data = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32) scales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32) output = interpolate_nd( data, lambda x, _: linear_coeffs(x), scale_factors=scales, coordinate_transformation_mode="half_pixel_symmetric", ).astype(np.float32) x = [data, scales] y = output for i in range(len(x)): x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "resize_upsample_scales_linear_half_pixel_symmetric" func_sig = "data.resize(" func_sig += "Option::None," func_sig += "scales," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC)," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::None," func_sig += "Option::Some(MODE::LINEAR)," func_sig += "Option::None,)" make_test([x[0], x[1]], y, func_sig, name)
https://github.com/gizatechxyz/orion
nodegen/node/reverse_sequence.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Reverse_sequence(RunAll): @staticmethod def Reverse_sequence_u32(): def reverse_sequence_u32_4x4_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_u32_4x4_batch" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_u32_4x4_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_u32_4x4_time" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name ) def reverse_sequence_u32_3x3_batch(): x = np.array([0,1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(3,3) y = np.array([2,1,0,3,4,5,7,6,8], dtype=np.uint32).reshape(3,3) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_u32_3x3_batch" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![3].span(), array![3,1,2].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_u32_3x3_time(): x = np.array([0,1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(3,3) y = np.array([0,7,8,3,4,5,6,1,2], dtype=np.uint32).reshape(3,3) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_u32_3x3_time" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![3].span(), array![1,3,3].span()), Option::Some(1), Option::Some(0))", name ) reverse_sequence_u32_4x4_batch() reverse_sequence_u32_4x4_time() reverse_sequence_u32_3x3_batch() reverse_sequence_u32_3x3_time() @staticmethod def Reverse_sequence_i32(): def reverse_sequence_i32_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int32).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int32).reshape((4, 4)) _x = Tensor(Dtype.I32, x.shape, x.flatten()) _y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reverse_sequence_i32_batch_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_i32_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int32).reshape((4, 4)) _x = Tensor(Dtype.I32, x.shape, x.flatten()) _y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "reverse_sequence_i32_time_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name ) reverse_sequence_i32_batch() reverse_sequence_i32_time() @staticmethod def Reverse_sequence_i8(): def reverse_sequence_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int8).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int8).reshape((4, 4)) _x = Tensor(Dtype.I8, x.shape, x.flatten()) _y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "reverse_sequence_i8_batch_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_i8_time_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name ) reverse_sequence_batch() reverse_sequence_time() def Reverse_sequence_fp16x16(): def reverse_sequence_batch(): x = to_fp(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) y = to_fp(np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reverse_sequence_fp16x16_batch_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_time(): x = to_fp(np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) y = to_fp(np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "reverse_sequence_fp16x16_time_equal_parts" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name ) reverse_sequence_batch() reverse_sequence_time() def reverse_sequence_different_dimensions(): def reverse_sequence_different_dimensions_4_5(): x = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], dtype=np.uint32).reshape(4,5) y = np.array([5,4,3,2,1,9,8,7,6,10,13,12,11,14,15,17,16,18,19,20], dtype=np.uint32).reshape(4,5) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_different_dimensions_4_5" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![5,4,3,2].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_different_dimensions_2_4(): x = np.array([1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(2,4) y = np.array([5,6,7,8,1,2,3,4], dtype=np.uint32).reshape(2,4) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_different_dimensions_2_4" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![4].span(), array![2,2,2,2].span()), Option::Some(1), Option::Some(0))", name ) def reverse_sequence_different_dimensions_1_6(): x = np.array([0,1,2,3,4,5], dtype=np.uint32).reshape(1,6) y = np.array([4,3,2,1,0,5], dtype=np.uint32).reshape(1,6) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_different_dimensions_1_6" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![1].span(), array![5].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_different_dimensions_3x9_batch(): x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26], dtype=np.uint32).reshape(3,9) y = np.array([6,5,4,3,2,1,0,7,8,16,15,14,13,12,11,10,9,17,26,25,24,23,22,21,20,19,18], dtype=np.uint32).reshape(3,9) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_different_dimensions_3x9_batch" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![3].span(), array![7,8,9].span()), Option::Some(0), Option::Some(1))", name ) def reverse_sequence_different_dimensions_3x9_time(): x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26], dtype=np.uint32).reshape(3,9) y = np.array([18,10,20,12,22,14,24,16,8,9,1,11,3,13,5,15,7,17,0,19,2,21,4,23,6,25,26], dtype=np.uint32).reshape(3,9) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "reverse_sequence_different_dimensions_3x9_time" make_test( [_x], _y, "input_0.reverse_sequence(TensorTrait::<usize>::new(array![9].span(), array![3,2,3,2,3,2,3,2,1].span()), Option::Some(1), Option::Some(0))", name ) reverse_sequence_different_dimensions_4_5() reverse_sequence_different_dimensions_2_4() reverse_sequence_different_dimensions_1_6() reverse_sequence_different_dimensions_3x9_batch() reverse_sequence_different_dimensions_3x9_time()
https://github.com/gizatechxyz/orion
nodegen/node/round.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Round(RunAll): @staticmethod def round_fp8x23(): x = np.array([0.1, 0.5, 0.9, 1.2, 1.5, 1.8, 2.3, 2.5, 2.7, -1.1, -1.5, -1.9, -2.2, -2.5, -2.8]).astype(np.float64) y = np.array([0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, -1.0, -2.0, -2.0, -2.0, -3.0, -3.0]).astype(np.float64) x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "round_fp8x23" make_test([x], y, "input_0.round()", name) @staticmethod def round_fp16x16(): x = np.array([0.1, 0.5, 0.9, 1.2, 1.5, 1.8, 2.3, 2.5, 2.7, -1.1, -1.5, -1.9, -2.2, -2.5, -2.8]).astype(np.float64) y = np.array([0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, -1.0, -2.0, -2.0, -2.0, -3.0, -3.0]).astype(np.float64) x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) name = "round_fp16x16" make_test([x], y, "input_0.round()", name)
https://github.com/gizatechxyz/orion
nodegen/node/running.py
import os import glob # Directory path where Python files/modules are located directory_path = 'nodegen/node/' # Get all files in the directory all_files = os.listdir(directory_path) # Filter Python files using glob and '*.py' pattern python_files = [file[:-3] for file in all_files if file.endswith('.py')] fixed = [ 'abs', 'argmax', 'argmin', 'concat', 'cumsum', 'div', 'equal', 'less_equal', 'greater', 'linear', 'matmul', 'mul', 'or', 'reduce_sum', 'sub', 'transpose', 'xor', 'less', 'greater_equal', 'slice', 'gather', 'nonzero', 'squeeze', 'unsqueeze', 'sign', 'clip', '__init__', 'running' ] for node in python_files: if node not in fixed: current_dir = os.getcwd() command = f'python nodegen/node/__init__.py {node}' os.system(command)
https://github.com/gizatechxyz/orion
nodegen/node/scatter.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait # The below ScatterElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360 def scatter_elements(data, indices, updates, axis=0, reduction="none"): # type: ignore if axis < 0: axis = data.ndim + axis idx_xsection_shape = indices.shape[:axis] + indices.shape[axis + 1 :] def make_slice(arr, axis, i): # type: ignore slc = [slice(None)] * arr.ndim slc[axis] = i return slc def unpack(packed): # type: ignore unpacked = packed[0] for i in range(1, len(packed)): unpacked = unpacked, packed[i] return unpacked def make_indices_for_duplicate(idx): # type: ignore final_idx = [] for i in range(len(idx[0])): final_idx.append(tuple(idx_element[i] for idx_element in idx)) return list(final_idx) # We use indices and axis parameters to create idx # idx is in a form that can be used as a NumPy advanced indices for scattering of updates param. in data idx = [ [ unpack(np.indices(idx_xsection_shape).reshape(indices.ndim - 1, -1)), indices[tuple(make_slice(indices, axis, i))].reshape(1, -1)[0], ] for i in range(indices.shape[axis]) ] idx = list(np.concatenate(idx, axis=1)) idx.insert(axis, idx.pop()) # updates_idx is a NumPy advanced indices for indexing of elements in the updates updates_idx = list(idx) updates_idx.pop(axis) updates_idx.insert( axis, np.repeat(np.arange(indices.shape[axis]), np.prod(idx_xsection_shape)) ) scattered = np.copy(data) if reduction == "none": scattered[tuple(idx)] = updates[tuple(updates_idx)] else: idx, updates_idx = make_indices_for_duplicate(idx), make_indices_for_duplicate( updates_idx ) for iter, idx_set in enumerate(idx): if reduction == "add": scattered[idx_set] += updates[updates_idx[iter]] elif reduction == "mul": scattered[idx_set] *= updates[updates_idx[iter]] elif reduction == "max": scattered[idx_set] = np.maximum( scattered[idx_set], updates[updates_idx[iter]] ) elif reduction == "min": scattered[idx_set] = np.minimum( scattered[idx_set], updates[updates_idx[iter]] ) return scattered class Scatter(RunAll): @staticmethod def scatter_fp16x16(): def scatter(): def default(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'none') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(x2.flatten(), FixedImpl.FP16x16)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_fp16x16_3d_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))", name= name) def axis_1(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'none') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(x2.flatten(), FixedImpl.FP16x16)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_fp16x16_3d_axis1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))", name= name) def axis_1_add(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'add') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(x2.flatten(), FixedImpl.FP16x16)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_fp16x16_3d_axis1_add" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('add'))", name= name) default() axis_1() axis_1_add() scatter() @staticmethod def scatter_fp8x23(): def scatter(): def default(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'none') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(x2.flatten(), FixedImpl.FP8x23)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "scatter_fp8x23_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))", name= name) def axis1(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'none') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(x2.flatten(), FixedImpl.FP8x23)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "scatter_fp8x23_axis1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))", name= name) def axis1_mul(): x1 = np.zeros((3, 3)).astype(np.int64) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int64) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'mul') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(x2.flatten(), FixedImpl.FP8x23)) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) name = "scatter_fp8x23_mul" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('mul'))", name= name) default() axis1() axis1_mul() scatter() @staticmethod def scatter_i8(): def scatter_3D(): def default(): x1 = np.zeros((3, 3)).astype(np.int8) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int8) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'none') x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I8, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "scatter_i8_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))", name= name) def axis1(): x1 = np.zeros((3, 3)).astype(np.int8) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int8) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'none') x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I8, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "scatter_i8_axis1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))", name= name) def axis1_max(): x1 = np.zeros((3, 3)).astype(np.int8) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int8) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'max') x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I8, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "scatter_i8_axis1_max" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max'))", name= name) default() axis1() axis1_max() scatter_3D() @staticmethod def scatter_i32(): def scatter_3D(): def default(): x1 = np.zeros((3, 3)).astype(np.int32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'none') x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "scatter_i8_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))", name= name) def axis1(): x1 = np.zeros((3, 3)).astype(np.int32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'none') x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "scatter_i8_axis1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))", name= name) def axis_min(): x1 = np.zeros((3, 3)).astype(np.int32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'min') x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "scatter_i8_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('min'))", name= name) default() axis1() axis_min() scatter_3D() @staticmethod def scatter_u32(): def scatter_3D(): def default(): x1 = np.zeros((3, 3)).astype(np.uint32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'none') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_u32_default" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))", name= name) def axis1(): x1 = np.zeros((3, 3)).astype(np.uint32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 1, 'none') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_u32_axis1" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))", name= name) def axis_add(): x1 = np.zeros((3, 3)).astype(np.uint32) x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32) x3 = np.array( [[0,1,2], [2,0,1], [1,0,1]], ) y = scatter_elements(x1, x3, x2, 0, 'add') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_u32_add" make_test( inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add'))", name= name) default() axis1() axis_add() scatter_3D()
https://github.com/gizatechxyz/orion
nodegen/node/scatter_nd.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def scatter_nd_impl(data, indices, updates, reduction="none"): # type: ignore # Check tensor shapes assert indices.shape[-1] <= len(data.shape) assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1] :] # Compute output output = np.copy(data) for i in np.ndindex(indices.shape[:-1]): # NOTE: The order of iteration in this loop is not specified. if reduction == "add": output[tuple(indices[i])] += updates[i] elif reduction == "mul": output[tuple(indices[i])] *= updates[i] elif reduction == "max": output[tuple(indices[i])] = np.maximum(output[indices[i]], updates[i]) elif reduction == "min": output[tuple(indices[i])] = np.minimum(output[indices[i]], updates[i]) else: output[tuple(indices[i])] = updates[i] return output data = np.array( [ [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], ], dtype=np.float32, ) indices = np.array([[0], [2]], dtype=np.int64) updates = np.array( [ [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], ], dtype=np.float32, ) class Scatter_nd(RunAll): @staticmethod def scatter_nd_fp16x16(): def scatter_nd_3D(): def default(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='none') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_nd_fp16x16_3d_default" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", name= name) def add(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='add') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_nd_fp16x16_3d_add" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", name= name) def mul(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='mul') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_nd_fp16x16_3d_mul" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", name= name) def max(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='max') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_nd_fp16x16_3d_max" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", name= name) def min(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='min') x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "scatter_nd_fp16x16_3d_min" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", name= name) default() add() mul() max() min() scatter_nd_3D() @staticmethod def scatter_nd_fp8x23(): def scatter_nd_3D(): def default(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='none') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "scatter_nd_fp8x23_3d_default" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", name= name) def add(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='add') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "scatter_nd_fp8x23_3d_add" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", name= name) def mul(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='mul') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "scatter_nd_fp8x23_3d_mul" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", name= name) def max(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='max') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "scatter_nd_fp8x23_3d_max" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", name= name) def min(): x1 = data.astype(np.int64) x2 = indices.astype(np.int64) x3 = updates.astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='min') x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "scatter_nd_fp8x23_3d_min" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", name= name) default() add() mul() max() min() scatter_nd_3D() @staticmethod def scatter_nd_u32(): def scatter_nd_3D(): def default(): x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) x2 = np.array([[0],[1]]).astype(np.uint32) x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='none') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_nd_u32_default" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", name= name) def add(): x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) x2 = np.array([[1],[0]]).astype(np.uint32) x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='add') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_nd_u32_add" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", name= name) def mul(): x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) x2 =np.array([[0],[1]]).astype(np.uint32) x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='mul') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_nd_u32_mul" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", name= name) def max(): x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) x2 =np.array([[0],[1]]).astype(np.uint32) x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='max') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_nd_u32_max" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", name= name) def min(): x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) x2 = np.array([[0],[1]]).astype(np.uint32) x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) y = scatter_nd_impl(x1, x2, x3, reduction='min') x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "scatter_nd_u32_min" make_test( inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", name= name) default() add() mul() max() min() scatter_nd_3D()
https://github.com/gizatechxyz/orion
nodegen/node/sequence_at.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) class Sequence_at(RunAll): @staticmethod def sequence_at_u32(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) name = "sequence_at_u32_positive" make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) name = "sequence_at_u32_negative" make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) positive_position() negative_position() @staticmethod def sequence_at_i32(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) name = "sequence_at_i32_positive" make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) name = "sequence_at_i32_negative" make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) positive_position() negative_position() @staticmethod def sequence_at_i8(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) name = "sequence_at_i8_positive" make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) name = "sequence_at_i8_negative" make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) positive_position() negative_position() @staticmethod def sequence_at_fp8x23(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) position = scalar(2) name = "sequence_at_fp8x23_positive" make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) position = scalar(-2) name = "sequence_at_fp8x23_negative" make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) positive_position() negative_position() @staticmethod def sequence_at_fp16x16(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) position = scalar(2) name = "sequence_at_fp16x16_positive" make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) position = scalar(-2) name = "sequence_at_fp16x16_negative" make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE) positive_position() negative_position()
https://github.com/gizatechxyz/orion
nodegen/node/sequence_construct.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait class Sequence_construct(RunAll): @staticmethod def sequence_construct_u32(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_construct_u32" make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE) @staticmethod def sequence_construct_i32(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_construct_i32" make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE) @staticmethod def sequence_construct_i8(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_construct_i8" make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE) @staticmethod def sequence_construct_fp8x23(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) name = "sequence_construct_fp8x23" make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE) @staticmethod def sequence_construct_fp16x16(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) name = "sequence_construct_fp16x16" make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
https://github.com/gizatechxyz/orion
nodegen/node/sequence_empty.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, Dtype, Tensor, Trait class Sequence_empty(RunAll): @staticmethod def sequence_empty_u32(): def default(): shape=(0,) x = np.zeros(shape, dtype=np.uint32) t = Tensor(Dtype.U32, shape, x.flatten()) make_test( inputs=[], output=[t], func_sig="SequenceTrait::sequence_empty()", name="sequence_empty_u32", trait=Trait.SEQUENCE ) default() @staticmethod def sequence_empty_i32(): def default(): shape=(0,) x = np.zeros(shape, dtype=np.int32) t = Tensor(Dtype.I32, shape, x.flatten()) make_test( inputs=[], output=[t], func_sig="SequenceTrait::sequence_empty()", name="sequence_empty_i32", trait=Trait.SEQUENCE ) default() @staticmethod def sequence_empty_i8(): def default(): shape=(0,) x = np.zeros(shape, dtype=np.int8) t = Tensor(Dtype.I8, shape, x.flatten()) make_test( inputs=[], output=[t], func_sig="SequenceTrait::sequence_empty()", name="sequence_empty_i8", trait=Trait.SEQUENCE ) default() @staticmethod def sequence_empty_fp8x23(): def default(): shape=(0,) x = np.zeros(shape, dtype=np.float64) t = Tensor(Dtype.FP8x23, shape, x.flatten()) make_test( inputs=[], output=[t], func_sig="SequenceTrait::sequence_empty()", name="sequence_empty_fp8x23", trait=Trait.SEQUENCE ) default() @staticmethod def sequence_empty_fp16x16(): def default(): shape=(0,) x = np.zeros(shape, dtype=np.float64) t = Tensor(Dtype.FP16x16, shape, x.flatten()) make_test( inputs=[], output=[t], func_sig="SequenceTrait::sequence_empty()", name="sequence_empty_fp16x16", trait=Trait.SEQUENCE ) default()
https://github.com/gizatechxyz/orion
nodegen/node/sequence_erase.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) class Sequence_erase(RunAll): @staticmethod def sequence_erase_u32(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) output_sequence = sequence.copy() output_sequence.pop(2) name = "sequence_erase_u32_positive" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) output_sequence = sequence.copy() output_sequence.pop(-2) name = "sequence_erase_u32_negative" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def empty_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) output_sequence = sequence.copy() output_sequence.pop(-1) name = "sequence_erase_u32_empty" make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE) positive_position() negative_position() empty_position() @staticmethod def sequence_erase_i32(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) output_sequence = sequence.copy() output_sequence.pop(2) name = "sequence_erase_i32_positive" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) output_sequence = sequence.copy() output_sequence.pop(-2) name = "sequence_erase_i32_negative" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def empty_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) output_sequence = sequence.copy() output_sequence.pop(-1) name = "sequence_erase_i32_empty" make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE) positive_position() negative_position() empty_position() @staticmethod def sequence_erase_i8(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) position = scalar(2) output_sequence = sequence.copy() output_sequence.pop(2) name = "sequence_erase_i8_positive" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) position = scalar(-2) output_sequence = sequence.copy() output_sequence.pop(-2) name = "sequence_erase_i8_negative" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def empty_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) output_sequence = sequence.copy() output_sequence.pop(-1) name = "sequence_erase_i8_empty" make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE) positive_position() negative_position() empty_position() @staticmethod def sequence_erase_fp8x23(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) position = scalar(2) output_sequence = sequence.copy() output_sequence.pop(2) name = "sequence_erase_fp8x23_positive" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) position = scalar(-2) output_sequence = sequence.copy() output_sequence.pop(-2) name = "sequence_erase_fp8x23_negative" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def empty_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) output_sequence = sequence.copy() output_sequence.pop(-1) name = "sequence_erase_fp8x23_empty" make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE) positive_position() negative_position() empty_position() @staticmethod def sequence_erase_fp16x16(): def positive_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) position = scalar(2) output_sequence = sequence.copy() output_sequence.pop(2) name = "sequence_erase_fp16x16_positive" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def negative_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) position = scalar(-2) output_sequence = sequence.copy() output_sequence.pop(-2) name = "sequence_erase_fp16x16_negative" make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE) def empty_position(): sequence = [] shape = np.random.randint(1, 4, 2) for _ in range(5): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) output_sequence = sequence.copy() output_sequence.pop(-1) name = "sequence_erase_fp16x16_empty" make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE) positive_position() negative_position() empty_position()
https://github.com/gizatechxyz/orion
nodegen/node/sequence_insert.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) class Sequence_insert(RunAll): @staticmethod def sequence_insert_u32(): def default(): sequence = [] tensor_cnt = 3 shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): val = np.random.randint(0, 6, shape).astype(np.uint32) t = Tensor(Dtype.U32, val.shape, val.flatten()) sequence.append(t) val = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, val.shape, val.flatten()) position = np.random.randint(-2, 2) expected_sequence = sequence.copy() expected_sequence.insert(position, tensor) name = "sequence_insert_u32" make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE) default() @staticmethod def sequence_insert_i32(): def default(): sequence = [] tensor_cnt = 3 shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): val = np.random.randint(0, 6, shape).astype(np.int32) t = Tensor(Dtype.I32, val.shape, val.flatten()) sequence.append(t) val = np.random.randint(0, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, val.shape, val.flatten()) position = np.random.randint(-2, 2) expected_sequence = sequence.copy() expected_sequence.insert(position, tensor) name = "sequence_insert_i32" make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE) default() @staticmethod def sequence_insert_i8(): def default(): sequence = [] tensor_cnt = 3 shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): val = np.random.randint(0, 6, shape).astype(np.int8) t = Tensor(Dtype.I8, val.shape, val.flatten()) sequence.append(t) val = np.random.randint(0, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, val.shape, val.flatten()) position = np.random.randint(-2, 2) expected_sequence = sequence.copy() expected_sequence.insert(position, tensor) name = "sequence_insert_i8" make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE) default() @staticmethod def sequence_insert_fp8x23(): def default(): sequence = [] tensor_cnt = 3 shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): val = np.random.randint(0, 6, shape).astype(np.float64) t = Tensor(Dtype.FP8x23, val.shape, to_fp( val.flatten(), FixedImpl.FP8x23)) sequence.append(t) val = np.random.randint(0, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, val.shape, to_fp( val.flatten(), FixedImpl.FP8x23)) position = np.random.randint(-2, 2) expected_sequence = sequence.copy() expected_sequence.insert(position, tensor) name = "sequence_insert_fp8x23" make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE) default() @staticmethod def sequence_insert_fp16x16(): def default(): sequence = [] tensor_cnt = 3 shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): val = np.random.randint(0, 6, shape).astype(np.float64) t = Tensor(Dtype.FP16x16, val.shape, to_fp( val.flatten(), FixedImpl.FP16x16)) sequence.append(t) val = np.random.randint(0, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, val.shape, to_fp( val.flatten(), FixedImpl.FP16x16)) position = np.random.randint(-2, 2) expected_sequence = sequence.copy() expected_sequence.insert(position, tensor) name = "sequence_insert_fp16x16" make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE) default()
https://github.com/gizatechxyz/orion
nodegen/node/sequence_length.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait scalar = lambda x: Tensor(Dtype.U32, (), np.array([x]).astype(np.uint32).flatten()) class Sequence_length(RunAll): @staticmethod def sequence_length_u32(): def default(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_u32" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) def broadcast(): sequence = [] tensor_cnt = np.random.randint(1, 10) for _ in range(tensor_cnt): shape = np.random.randint(1, 4, 2) values = np.random.randint(0, 6, shape).astype(np.uint32) tensor = Tensor(Dtype.U32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_u32_broadcast" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) default() broadcast() @staticmethod def sequence_length_i32(): def default(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_i32" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) def broadcast(): sequence = [] tensor_cnt = np.random.randint(1, 10) for _ in range(tensor_cnt): shape = np.random.randint(1, 4, 2) values = np.random.randint(-6, 6, shape).astype(np.int32) tensor = Tensor(Dtype.I32, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_i32_broadcast" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) default() broadcast() @staticmethod def sequence_length_i8(): def default(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_i8" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) def broadcast(): sequence = [] tensor_cnt = np.random.randint(1, 10) for _ in range(tensor_cnt): shape = np.random.randint(1, 4, 2) values = np.random.randint(-6, 6, shape).astype(np.int8) tensor = Tensor(Dtype.I8, values.shape, values.flatten()) sequence.append(tensor) name = "sequence_length_i8_broadcast" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) default() broadcast() @staticmethod def sequence_length_fp8x23(): def default(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) name = "sequence_length_fp8x23" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) def broadcast(): sequence = [] tensor_cnt = np.random.randint(1, 10) for _ in range(tensor_cnt): shape = np.random.randint(1, 4, 2) values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23)) sequence.append(tensor) name = "sequence_length_fp8x23_broadcast" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) default() broadcast() @staticmethod def sequence_length_fp16x16(): def default(): sequence = [] tensor_cnt = np.random.randint(1, 10) shape = np.random.randint(1, 4, 2) for _ in range(tensor_cnt): values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) name = "sequence_length_fp16x16" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) def broadcast(): sequence = [] tensor_cnt = np.random.randint(1, 10) for _ in range(tensor_cnt): shape = np.random.randint(1, 4, 2) values = np.random.randint(-6, 6, shape).astype(np.float64) tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16)) sequence.append(tensor) name = "sequence_length_fp16x16_broadcast" make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE) default() broadcast()
https://github.com/gizatechxyz/orion
nodegen/node/shrink.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl def shrink(input_array: np.ndarray, bias: float, lambd: float) -> np.ndarray: output_array = np.where(input_array > lambd, input_array - bias, np.where(input_array < -lambd, input_array + bias, 0)) return output_array class Shrink(RunAll): @staticmethod def shrink_fp8x23(): def shrink_hard(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) bias = np.float64(0) # Default value lambd = np.float64(1) y = shrink(x, bias, lambd) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "shrink_hard_fp8x23" make_test([x], y, "TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)))", name) def shrink_soft(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) bias = np.float64(1) lambd = np.float64(1) y = shrink(x, bias, lambd) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "shrink_soft_fp8x23" make_test([x], y, "TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false)))", name) shrink_hard() shrink_soft() @staticmethod def shrink_fp16x16(): def shrink_hard(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) bias = np.float64(0) # Default value lambd = np.float64(1) y = shrink(x, bias, lambd) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "shrink_hard_fp16x16" make_test([x], y, "TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)))", name) def shrink_soft(): x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64) bias = np.float64(1) lambd = np.float64(1) y = shrink(x, bias, lambd) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "shrink_soft_fp16x16" make_test([x], y, "TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false)))", name) shrink_hard() shrink_soft()
https://github.com/gizatechxyz/orion
nodegen/node/sigmoid.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait import tensorflow as tf class Sigmoid(RunAll): @staticmethod def fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32) y = tf.keras.activations.sigmoid(x).numpy() x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "sigmoid_fp8x23" make_test([x], y, "NNTrait::sigmoid(@input_0)", name, Trait.NN) @staticmethod def fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32) y = tf.keras.activations.sigmoid(x).numpy() x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "sigmoid_fp16x16" make_test([x], y, "NNTrait::sigmoid(@input_0)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/sign.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Sign(RunAll): @staticmethod def sign_i8(): def sign(): x = np.array(range(-5, 6)).astype(np.int8) y = np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int8) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "sign_i8" make_test( [x], y, "input_0.sign()", name) sign() @staticmethod def sign_i32(): def sign(): x = np.array(range(-5, 6)).astype(np.int32) y = np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "sign_i32" make_test( [x], y, "input_0.sign()", name) sign() @staticmethod def sign_fail(): def sign(): x = np.array(range(-5, 6)).astype(np.int32) y = np.array([1, -1, -1, -1, -1, 0, 1, 1, 1, 1, -1]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "sign_fail" make_test( [x], y, "input_0.sign()", name) sign() @staticmethod def sign_fP16x16(): def sign(): x = to_fp (np.array(range(-5, 6)).astype(np.int64), FixedImpl.FP16x16) y = to_fp (np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int64), FixedImpl.FP16x16) x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "sign_fP16x16" make_test( [x], y, "input_0.sign()", name) sign() @staticmethod def sign_fP8x23(): def sign(): x = to_fp (np.array(range(-5, 6)).astype(np.int64), FixedImpl.FP8x23) y = to_fp (np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int64), FixedImpl.FP8x23) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "sign_fP8x23" make_test( [x], y, "input_0.sign()", name) sign()
https://github.com/gizatechxyz/orion
nodegen/node/sin.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Sin(RunAll): @staticmethod def sin_fp8x23(): x = np.random.uniform(-3, 7, (2, 2)).astype(np.float64) y = np.sin(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "sin_fp8x23" make_test([x], y, "input_0.sin()", name) @staticmethod def sin_fp16x16(): x = np.random.uniform(-3, 7, (2, 2)).astype(np.float64) y = np.sin(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "sin_fp16x16" make_test([x], y, "input_0.sin()", name)
https://github.com/gizatechxyz/orion
nodegen/node/sinh.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Sinh(RunAll): @staticmethod def sinh_fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.sinh(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "sinh_fp8x23" make_test([x], y, "input_0.sinh()", name) @staticmethod def sinh_fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = np.sinh(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "sinh_fp16x16" make_test([x], y, "input_0.sinh()", name)
https://github.com/gizatechxyz/orion
nodegen/node/slice.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Slice(RunAll): @staticmethod def slice_u32(): def slice_2D(): x = np.random.randint(0, 255, (2, 4)).astype(np.uint32) y = x[0:2, 2:4] x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "slice_u32_2d" make_test( [x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name) def slice_3D(): x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32) y = x[0:3, 0:10:3] x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "slice_u32_3d" make_test( [x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name) slice_2D() slice_3D() @staticmethod def slice_i32(): def slice_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int32) y = x[0:2, 2:4] x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "slice_i32_2d" make_test( [x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name) def slice_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32) y = x[0:3, 0:10:3] x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "slice_i32_3d" make_test( [x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name) slice_2D() slice_3D() @staticmethod def slice_i8(): def slice_2D(): x = np.random.randint(-127, 127, (2, 4)).astype(np.int8) y = x[0:2, 2:4] x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "slice_i8_2d" make_test( [x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name) def slice_3D(): x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8) y = x[0:3, 0:10:3] x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "slice_i8_3d" make_test( [x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name) slice_2D() slice_3D() @staticmethod def slice_fp8x23(): def slice_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP8x23) y = x[0:2, 2:4] x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "slice_fp8x23_2d" make_test( [x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name) def slice_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP8x23) y = x[0:3, 0:10:3] x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) name = "slice_fp8x23_3d" make_test( [x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name) slice_2D() slice_3D() @staticmethod def slice_fp16x16(): def slice_2D(): x = to_fp(np.random.randint(-127, 127, (2, 4) ).astype(np.int64), FixedImpl.FP16x16) y = x[0:2, 2:4] x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "slice_fp16x16_2d" make_test( [x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name) def slice_3D(): x = to_fp(np.random.randint(-127, 127, (20, 10, 5) ).astype(np.int64), FixedImpl.FP16x16) y = x[0:3, 0:10:3] x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) name = "slice_fp16x16_3d" make_test( [x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name) slice_2D() slice_3D()
https://github.com/gizatechxyz/orion
nodegen/node/softmax.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray: x_max = np.max(x, axis=axis, keepdims=True) tmp = np.exp(x - x_max) s = np.sum(tmp, axis=axis, keepdims=True) return tmp / s class Softmax(RunAll): @staticmethod def axis_0(): x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) y = softmax(x, axis=0) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softmax_axis_0" make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(0))", name, Trait.NN) @staticmethod def axis_1(): x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) y = softmax(x, axis=1) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softmax_axis_1" make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(1))", name, Trait.NN) @staticmethod def axis_2(): x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) y = softmax(x, axis=2) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softmax_axis_2" make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(2))", name, Trait.NN) @staticmethod def axis_minus_1(): x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) y = softmax(x, axis=-1) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softmax_axis_minus_1" make_test([x], y, "NNTrait::softmax(@input_0, Option::None)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/softmax_zero.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def softmax_zero(x: np.ndarray, axis: int = -1) -> np.ndarray: x_max = np.max(x, axis=axis, keepdims=True) tmp = np.exp(x - x_max) tmp = np.where(x == 0.0, 0.0, tmp) s = np.sum(tmp, axis=axis, keepdims=True) s = np.where(s == 0.0, 1, s) return tmp / s class Softmax_zero(RunAll): @staticmethod def fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = softmax_zero(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "softmax_zero_fp8x23" make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)", name, Trait.NN) @staticmethod def fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = softmax_zero(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softmax_zero_fp16x16" make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/softplus.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def softplus(x: np.ndarray) -> np.ndarray: return np.log(np.exp(x) + 1) class Softplus(RunAll): @staticmethod def softplus_fp(): def fp8x23(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = softplus(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "softplus_fp8x23" make_test([x], y, "NNTrait::softplus(@input_0)", name, Trait.NN) def fp16x16(): x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) y = softplus(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softplus_fp16x16" make_test([x], y, "NNTrait::softplus(@input_0)", name, Trait.NN) fp8x23() fp16x16()
https://github.com/gizatechxyz/orion
nodegen/node/softsign.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def softsign(x: np.ndarray) -> np.ndarray: return x / (1 + np.abs(x)) class Softsign(RunAll): @staticmethod def softsign_fp(): def fp8x23(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) y = softsign(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "softsign_fp8x23" make_test([x], y, "NNTrait::softsign(@input_0)", name, Trait.NN) def fp16x16(): x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64) y = softsign(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "softsign_fp16x16" make_test([x], y, "NNTrait::softsign(@input_0)", name, Trait.NN) fp8x23() fp16x16()
https://github.com/gizatechxyz/orion
nodegen/node/space_to_depth.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait def space_to_depth(data: np.ndarray, blocksize: int = 2) -> np.ndarray: if len(data.shape) != 4: raise RuntimeError(f"Unexpected shape {data.shape!r}.") b, C, H, W = data.shape tmpshape = ( b, C, H // blocksize, blocksize, W // blocksize, blocksize, ) reshaped = np.reshape(data, tmpshape) transposed = np.transpose(reshaped, [0, 3, 5, 1, 2, 4]) finalshape = ( b, C * blocksize * blocksize, H // blocksize, W // blocksize, ) y = np.reshape(transposed, finalshape).astype(data.dtype) return y class Space_to_depth(RunAll): @staticmethod def fp8x23(): x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) y = space_to_depth(x) x = Tensor(Dtype.FP8x23, x.shape, to_fp( x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) name = "space_to_depth_fp8x23" make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", name, Trait.NN) @staticmethod def fp16x16(): x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float16) y = space_to_depth(x) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) name = "space_to_depth_fp16x16" make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", name, Trait.NN) # @staticmethod # def fp64x64(): # x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) # y = space_to_depth(x) # x = Tensor(Dtype.FP64x64, x.shape, to_fp( # x.flatten(), FixedImpl.FP64x64)) # y = Tensor(Dtype.FP64x64, y.shape, to_fp( # y.flatten(), FixedImpl.FP64x64)) # name = "space_to_depth_fp64x64" # make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", # name, Trait.NN) @staticmethod def fpi8(): x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int8) y = space_to_depth(x) x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) name = "space_to_depth_i8" make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", name, Trait.NN) @staticmethod def fpi32(): x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int32) y = space_to_depth(x) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) name = "space_to_depth_i32" make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", name, Trait.NN) @staticmethod def fpu32(): x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.uint32) y = space_to_depth(x) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) name = "space_to_depth_u32" make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", name, Trait.NN)
https://github.com/gizatechxyz/orion
nodegen/node/split.py
import numpy as np from nodegen.node import RunAll from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl class Split(RunAll): @staticmethod def split_u32(): def split_1D(): x = np.random.randint(0, 255, 6).astype(np.uint32) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:4]).astype(np.uint32), np.array(x[4:6]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] name = "split_u32_1d_equal_parts" make_test( [_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:6]).astype(np.uint32), ] _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_u32_1d_variable_parts" make_test( [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_2D(): x = np.random.randint(0, 255, (2, 6)).astype(np.uint32) y = [ np.array(x[0:2, 0:3]).astype(np.uint32), np.array(x[0:2, 3:6]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_u32_2d_equal_parts" make_test( [_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name) y = [ np.array(x[0:2, 0:2]).astype(np.uint32), np.array(x[0:2, 2:6]).astype(np.uint32) ] _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), ] name = "split_u32_2d_variable_parts" make_test( [_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_zero_size(): # 1-dimensional tensor with dimension_size=0 x = np.array([]).astype(np.uint32) y = [ np.array([]).astype(np.uint32), np.array([]).astype(np.uint32), np.array([]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] # Split emtpy tensor to tensors of size zero name = "split_u32_zero_size" make_test( [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) def split_1d_uneven(): x = np.random.randint(0, 255, 7).astype(np.uint32) y = [ np.array(x[0:2]).astype(np.uint32), np.array(x[2:4]).astype(np.uint32), np.array(x[4:6]).astype(np.uint32), np.array(x[6:7]).astype(np.uint32), ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), Tensor(Dtype.U32, y[3].shape, y[3].flatten()), ] name = "split_u32_1d_uneven" make_test( [_x], _y, "input_0.split(0, Option::Some(4), Option::None(()))", name) def split_2d_uneven(): x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) y = [ np.array(x[0:2, 0:3]).astype(np.uint32), np.array(x[0:2, 3:6]).astype(np.uint32), np.array(x[0:2, 6:8]).astype(np.uint32) ] _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = [ Tensor(Dtype.U32, y[0].shape, y[0].flatten()), Tensor(Dtype.U32, y[1].shape, y[1].flatten()), Tensor(Dtype.U32, y[2].shape, y[2].flatten()), ] name = "split_u32_2d_uneven" make_test( [_x], _y, "input_0.split(1, Option::Some(3), Option::None(()))", name) split_1D() split_2D() split_zero_size() split_1d_uneven() split_2d_uneven() @staticmethod def split_fp16x16(): def split_1D(): x = to_fp(np.random.randint(-127, 127, 6 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:4]).astype(np.int64), np.array(x[4:6]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] name = "split_fp16x16_1d_equal_parts" make_test( [_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:6]).astype(np.int64), ] _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_fp16x16_1d_variable_parts" make_test( [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_2D(): x = to_fp(np.random.randint(-127, 127, (2, 6) ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2, 0:3]).astype(np.int64), np.array(x[0:2, 3:6]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_fp16x16_2d_equal_parts" make_test( [_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name) y = [ np.array(x[0:2, 0:2]).astype(np.int64), np.array(x[0:2, 2:6]).astype(np.int64) ] _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), ] name = "split_fp16x16_2d_variable_parts" make_test( [_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) def split_zero_size(): # 1-dimensional tensor with dimension_size=0 x = to_fp(np.array([]).astype(np.int64 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array([]).astype(np.int64), np.array([]).astype(np.int64), np.array([]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] # Split emtpy tensor to tensors of size zero name = "split_fp16x16_zero_size" make_test( [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) def split_1d_uneven(): x = to_fp(np.random.randint(-127, 127, 7 ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2]).astype(np.int64), np.array(x[2:4]).astype(np.int64), np.array(x[4:6]).astype(np.int64), np.array(x[6:7]).astype(np.int64), ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()), ] name = "split_fp16x16_1d_uneven" make_test( [_x], _y, "input_0.split(0, Option::Some(4), Option::None(()))", name) def split_2d_uneven(): x = to_fp(np.random.randint(-127, 127, (2, 8) ).astype(np.int64), FixedImpl.FP16x16) y = [ np.array(x[0:2, 0:3]).astype(np.int64), np.array(x[0:2, 3:6]).astype(np.int64), np.array(x[0:2, 6:8]).astype(np.int64) ] _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = [ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), ] name = "split_fp16x16_2d_uneven" make_test( [_x], _y, "input_0.split(1, Option::Some(3), Option::None(()))", name) split_1D() split_2D() split_zero_size() split_1d_uneven() split_2d_uneven()
https://github.com/gizatechxyz/orion