text
stringlengths 1
2.05k
|
---|
pe(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=1)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_3d_three_tensors_axis_1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)",
name= name, trai |
t= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_3d_three_tensors_axis_2"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)",
name= name, trait= Trait.TENSOR)
default()
axis_1()
axis_2()
three_tensors_axis_1()
three_tensors_axis_2()
concat_1D()
concat_2D()
concat_3D()
@staticmethod
def concat_i32():
def concat_1D():
x1 = np.arange(0,3).astype(np.int32)
x2 = np.arange(3,6).astype(np.int32)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_1d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR.TENSOR)
def concat_2D():
x1 = np.arange(0,4).astype(np.int32).reshape(2,2)
x2 = np.arange(4,8).astype(np.int32).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y |
= Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
def default():
x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def axis_1():
x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=2)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = |
"concat_i32_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int32).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=1)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.I32, x3.shape, x3.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_3d_three_tensors_axis_1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int32).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int32).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.I32, x3.shape, x3.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "concat_i32_3d_three_tensors_axis_2"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)",
name= name, trait= Trait.TENSOR)
default()
axis_1()
axis_2()
three_tensors_axis_1()
three_tensors_axis_2() |
concat_1D()
concat_2D()
concat_3D()
@staticmethod
def concat_i8():
def concat_1D():
x1 = np.arange(0,3).astype(np.int8)
x2 = np.arange(3,6).astype(np.int8)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_1d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR.TENSOR)
def concat_2D():
x1 = np.arange(0,4).astype(np.int8).reshape(2,2)
x2 = np.arange(4,8).astype(np.int8).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
def default():
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def axis_1( |
):
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, x3.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_3d_three_tensors_axis_1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![i |
nput_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int8).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int8).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int8).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, x1.flatten())
x2 = Tensor(Dtype.FP8x23, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, x3.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "concat_i8_3d_three_tensors_axis_2"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)",
name= name, trait= Trait.TENSOR)
default()
axis_1()
axis_2()
three_tensors_axis_1()
three_tensors_axis_2()
concat_1D()
concat_2D()
concat_3D()
@staticmethod
def concat_fp8x23():
def concat_1D():
x1 = np.arange(0,3).astype(np.int64)
x2 = np.arange(3,6).astype(np.int64)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_1d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR.TENSOR)
def concat_2D():
x1 = np.arange(0,4).astype(np.int64).reshape(2,2) |
x2 = np.arange(4,8).astype(np.int64).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
def default():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape,to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def axis_1():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_ |
fp8x23_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
x3 = Tensor(Dtype.FP8x23, x3.shape,to_fp(
x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_3d_three_tensors_axis_1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR) |
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.FP8x23, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP8x23))
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(
x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "concat_fp8x23_3d_three_tensors_axis_2"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)",
name= name, trait= Trait.TENSOR)
default()
axis_1()
axis_2()
three_tensors_axis_1()
three_tensors_axis_2()
concat_1D()
concat_2D()
concat_3D()
staticmethod
def concat_fp16x16():
def concat_1D():
x1 = np.arange(0,3).astype(np.int64)
x2 = np.arange(3,6).astype(np.int64)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_1d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR.TENSOR)
def concat_2D(): |
x1 = np.arange(0,4).astype(np.int64).reshape(2,2)
x2 = np.arange(4,8).astype(np.int64).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
def default():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def axis_1():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.s |
hape ,to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_axis_1"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 1)",
name= name, trait= Trait.TENSOR)
def axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_axis_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 2)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_1():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(
x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_three_tensors_axis_1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(a |
rray![input_0, input_1, input_2].span(), 1)",
name= name, trait= Trait.TENSOR)
def three_tensors_axis_2():
x1 = np.arange(0,27).astype(np.int64).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.int64).reshape(3,3,3)
x3 = np.arange(54,81).astype(np.int64).reshape(3,3,3)
y = np.concatenate((x1, x2, x3), axis=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(
x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.FP16x16, x2.shape, to_fp(
x2.flatten(), FixedImpl.FP16x16))
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(
x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape,to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "concat_fp16x16_3d_three_tensors_axis_2"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1, input_2].span(), 2)",
name= name, trait= Trait.TENSOR)
default()
axis_1()
axis_2()
three_tensors_axis_1()
three_tensors_axis_2()
concat_1D()
concat_2D()
concat_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Concat_from_sequence(RunAll):
@staticmethod
def concat_from_sequence_u32():
def new_axis_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32) |
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_default"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
new_axis_default()
@staticmethod
def concat_from_sequence_i32():
def new_axis_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = n |
p.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_default"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
new_axis_default()
@staticmethod
def concat_from_sequence_i8():
def new_axis_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name |
= "concat_from_sequence_i8_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i8_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i8_new_axis_default"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
new_axis_default()
@staticmethod
def concat_from_sequence_fp8x23():
def new_axis |
_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64) |
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_default"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
new_axis_default()
@staticmethod
def concat_from_sequence_fp16x16():
def new_axis_zero():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_zero"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(valu |
es.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(1)
concatenated_tensor = np.stack(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_one"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
values_array = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
values_array.append(values)
axis = np.int32(1)
new_axis = np.uint32(0)
concatenated_tensor = np.concatenate(values_array, axis)
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_default"
make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, 1_i32, Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
new_axis_default() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
import numpy as np
def r_index_check(r_index, shape_out):
for i in range(len(r_index)):
if r_index[i] >= shape_out[i]:
return False
return True
def stride(arr):
stride = np.zeros(len(arr))
acc = 1
for i in range(len(arr)):
stride[i] = acc
acc *= arr[-(i + 1)]
return np.flip(stride)
def conv(
X,
W,
B=None,
auto_pad=None,
dilations=None,
group=None,
kernel_shape=None,
pads=None,
strides=None,
):
if dilations is None:
dilations = [1 for s in X.shape[2:]]
if kernel_shape is None:
kernel_shape = W.shape[2:]
if pads is None:
pads = [0 for s in X.shape[2:]] * 2
if strides is None:
strides = [1 for s in X.shape[2:]]
if X.shape[1] != W.shape[1] * group or W.shape[0] % group != 0:
raise ValueError(
f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}, "
f"W should be {(W.shape[0], X.shape[1]
)
if group > 1:
res = []
td = 0
mg = W.shape[0]
dw = W.shape[1]
for b in range(X.shape[0]):
for g in range(group):
gx = X[b : b + 1, g * dw : (g + 1) * dw]
gw = W[g * mg : (g + 1) * mg]
try:
cv = conv(
gx,
gw,
None,
auto_pad,
dilations,
1,
kernel_shape,
pads,
strides,
)
except (ValueError, RuntimeError) as e:
raise ValueError(
f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={g}/{group}, "
f"gx.shape={gx.shape}, gw.shape={gw.shape}, auto_pad={auto_pad}, "
f"dilations={dilations}, kernel_shape={ |
kernel_shape}, pads={pads}, "
f"strides={strides}."
) from e
if b == 0:
td += cv.shape[1]
res.append((b, cv))
new_shape = [X.shape[0], *list(res[0][1].shape[1:])]
new_shape[1] = td
final = np.zeros(tuple(new_shape), dtype=res[0][1].dtype)
p = 0
for b, cv in res:
final[b : b + 1, p : p + cv.shape[1]] = cv
p += cv.shape[1]
if p >= final.shape[1]:
p = 0
if B is not None:
new_shape = [1 for s in final.shape]
new_shape[1] = B.shape[0]
b = B.reshape(tuple(new_shape))
final += b
return final
if dilations[0] != 1 or min(dilations) != max(dilations):
nd = len(dilations)
new_kernel_shape = []
new_shape = list(W.shape[:-nd])
for i, d in enumerate(dilations):
di = len(W.shape) - nd + i
new_shape.append(W.shape[di] + (W.shape[di] - 1) * (d - 1))
new_kernel_shape.append(kernel_shape[i] + (kernel_shape[i] - 1) * (d - 1))
new_w = np.zeros(tuple(new_shape), dtype=W.dtype)
indices = [slice(0, new_w.shape[0]), slice(0, new_w.shape[1])]
for i, d in enumerate(dilations):
di = len(W.shape) - nd + i
indices.append(slice(0, new_w.shape[di], d))
new_w[tuple(indices)] = W
W = new_w
kernel_shape = new_kernel_shape
if auto_pad in {"SAME_LOWER", "SAME_UPPER", "VALID"}:
head = []
tail = []
for i in range(len(X.shape) - 2):
d = X.shape[i]
target_size = (d + strides[i] - 1)
pad_needed = (target_size - 1) * strides[i] + kernel_shape[i] - d
if auto_pad == "SAME_LOWER":
pad_head = (pad_needed + 1)
else:
pad_head = pad_needed
pad_tail = pad_needed - pad_head
head.append(pad_head)
tail.append |
(pad_tail)
pads = head + tail
if len(X.shape) == 3:
sN, sC, sH = X.shape
(kh,) = kernel_shape
(sth,) = strides
h_out = int(((sH - kh + pads[0] + pads[1]) / sth) + 1)
h0 = pads[0]
oh = -1 * (kh % 2)
bh = -h0
eh = h_out * sth
res = np.zeros((X.shape[0], W.shape[0], h_out))
if B is not None:
res[:, :, :] += B.reshape((1, -1, 1))
for n in range(0, sN):
for nw in range(W.shape[0]):
for c in range(0, sC):
w = W[nw : nw + 1, c : c + 1]
for io in range(bh, eh, sth):
hr = (io - bh)
if hr >= h_out:
continue
i = io + kh % 2
ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH)
img = X[n : n + 1, c : c + 1, ih1:ih2]
if img.shape != w.shape:
jh1, jh2 = max(-oh - i, 0), min(kh, kh + sH - (i + oh + kh))
w_ = w[:1, :1, jh1:jh2]
if img.shape != w_.shape:
raise RuntimeError(
f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, "
f"i={i}, kh={kh}, sH={sH}, sth={sth}."
)
s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[
0, 0
]
else:
s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[
0, 0
]
res[n, nw, hr] += s
return res
if len(X.shape) == 4:
sN, sC, sH, sW = X.shape
kh, kw = kernel_shape
sth, stw = strides
h_out = int(((sH - kh + pads[0] + p |
ads[2]) / sth) + 1)
w_out = int(((sW - kw + pads[1] + pads[3]) / stw) + 1)
h0, w0 = pads[0], pads[1]
oh, ow = -1 * (kh % 2), -1 * (kw % 2)
bh, bw = -h0, -w0
eh, ew = h_out * sth, w_out * stw
res = np.zeros((X.shape[0], W.shape[0], h_out, w_out))
if B is not None:
res[:, :, :, :] = B.reshape((1, -1, 1, 1))
for n in range(0, sN):
for nw in range(W.shape[0]):
for c in range(0, sC):
w = W[nw : nw + 1, c : c + 1]
for io in range(bh, eh, sth):
hr = (io - bh)
if hr >= h_out:
continue
i = io + kh % 2
ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH)
for jo in range(bw, ew, stw):
wr = (jo - bw)
if wr >= w_out:
continue
j = jo + kw % 2
iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW)
img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2]
if img.shape != w.shape:
jh1, jh2 = max(-oh - i, 0), min(
kh, kh + sH - (i + oh + kh)
)
jw1, jw2 = max(-ow - j, 0), min(
kw, kw + sW - (j + ow + kw)
)
w_ = w[:1, :1, jh1:jh2, jw1:jw2]
if img.shape != w_.shape:
raise RuntimeError(
f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, "
f"i={i}, j={j}, kh={kh}, kw={kw}, sH={sH}, sW={sW}, sth={sth}, stw={stw}."
) |
s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[
0, 0
]
else:
s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[
0, 0
]
res[n, nw, hr, wr] += s
return res
if len(X.shape) == 5:
sN, sC, sH, sW, sZ = X.shape
kh, kw, kz = kernel_shape
sth, stw, stz = strides
h_out = int(((sH - kh + pads[0] + pads[3]) / sth) + 1)
w_out = int(((sW - kw + pads[1] + pads[4]) / stw) + 1)
z_out = int(((sZ - kz + pads[2] + pads[5]) / stz) + 1)
h0, w0, z0 = pads[0], pads[1], pads[2]
oh, ow, oz = -1 * (kh % 2), -1 * (kw % 2), -1 * (kz % 2)
bh, bw, bz = -h0, -w0, -z0
eh, ew, ez = h_out * sth, w_out * stw, z_out * stz
res = np.zeros((X.shape[0], W.shape[0], h_out, w_out, z_out))
if B is not None:
res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1))
for n in range(0, sN):
for nw in range(W.shape[0]):
for c in range(0, sC):
w = W[nw : nw + 1, c : c + 1]
for io in range(bh, eh, sth):
hr = (io - bh)
if hr >= h_out:
continue
i = io + kh % 2
ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH)
for jo in range(bw, ew, stw):
wr = (jo - bw)
if wr >= w_out:
continue
j = jo + kw % 2
iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW)
for zo in range(bz, ez, stz):
zr = (zo - bz)
if zr >= z_out: |
continue
z = zo + kz % 2
iz1, iz2 = max(0, z + oz), min(z + oz + kz, sZ)
img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2, iz1:iz2]
if img.shape != w.shape:
jh1, jh2 = max(-oh - i, 0), min(
kh, kh + sH - (i + oh + kh)
)
jw1, jw2 = max(-ow - j, 0), min(
kw, kw + sW - (j + ow + kw)
)
jz1, jz2 = max(-oz - z, 0), min(
kz, kz + sZ - (z + oz + kz)
)
w_ = w[:1, :1, jh1:jh2, jw1:jw2, jz1:jz2]
if img.shape != w_.shape:
raise RuntimeError(
f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, oz={oz}, "
f"i={i}, j={j}, z={z}, kh={kh}, kw={kw}, kz={kz}, "
f"sH={sH}, sW={sW}, sZ={sZ}, sth={sth}, stw={stw}, stz={stz}."
)
s = np.dot(
img.reshape((1, -1)), w_.reshape((-1, 1))
)[
0, 0
]
else:
s = np.dot(
img.reshape((1, -1)), w.reshape((-1, 1))
)[
0, 0 |
]
res[n, nw, hr, wr, zr] += s
return res
else:
nd = len(X.shape[2:])
sN, sC = X.shape[:2]
x_stride = stride(X.shape)
w_stride = stride(W.shape)
x_flatten = X.reshape(int(x_stride[0] * X.shape[0]))
shape_out = [int(((X.shape[2+i] - kernel_shape[i] + pads[i] + pads[i + nd]) / strides[i]) + 1) for i in range(nd)]
o_index = [-1 * (kernel_shape[i] % 2) for i in range(nd)]
b_index = [-pads[i] for i in range(nd)]
e_index = [shape_out[i] * strides[i] for i in range(nd)]
range_len = [e_index[i] - b_index[i] / strides[i] for i in range(nd)]
range_stride = stride(range_len)
res_shape = [X.shape[0], W.shape[0]] + shape_out
res = np.zeros(res_shape)
res_strides = stride(res_shape)
if B is not None:
res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1))
for n in range(0, sN):
for nw in range(W.shape[0]):
for c in range(0, sC):
w = W[nw : nw + 1, c : c + 1]
for i in range(int(range_len[0] * range_stride[0])):
flatten_index = i
io_index = np.zeros(nd)
r_index = np.zeros(nd)
for nx in range(nd):
n_index, rem = divmod(flatten_index, range_stride[nx])
flatten_index = rem
io_index[nx] = n_index * strides[nx] + b_index[nx]
r_index[nx] = n_index
if r_index_check(r_index, shape_out):
indices = [io_index[nx] + (kernel_shape[nx] % 2) for nx in range(nd)]
i1_index = [max(0, indices[nx] + o_index[nx]) for nx in range(nd)]
i2_index = [min(X.shape[2 + nx], indices[nx] + o_index[nx] + kernel_shape[n |
x]) for nx in range(nd)]
idiff_index = [int(i2_index[nx] - i1_index[nx]) for nx in range(nd - 1)]
i_stride = stride(idiff_index)
img = []
for ii in range(int(i_stride[0] * idiff_index[0])):
flatten_index = ii
start = n * x_stride[0] + c * x_stride[1]
for nx in range(nd - 1):
ii_index, rem = divmod(flatten_index, i_stride[nx])
flatten_index = rem
start += (i1_index[nx] + ii_index) * x_stride[2 + nx]
start += i1_index[nd-1]
end = start + (i2_index[nd-1] - i1_index[nd-1])
img.append(x_flatten[int(start):int(end)])
img_shape = [1, 1] + idiff_index
w = w.reshape(np.prod(kernel_shape))
if len(img) != len(w):
j1_index = [max(0, -indices[nx] - o_index[nx]) for nx in range(nd)]
j2_index = [min(X.shape[2 + nx] - indices[nx] - o_index[nx], kernel_shape[nx]) for nx in range(nd)]
jdiff_index = [j2_index[nx] - j1_index[nx] for nx in range(nd - 1)]
w_ = []
j_stride = stride(jdiff_index)
for jj in range(int(j_stride[0] * jdiff_index[0])):
flatten_index = jj
start = 0
for nx in range(nd):
jj_index, rem = divmod(flatten_index, range_stride[nx])
flatten_index = rem
start += (j1_index[nx] + jj_i |
ndex) * kernel_shape[nx]
w_.append(w[int(start + j1_index[-1]):int(start + j1_index[-1] + j2_index[nd-1] - j1_index[nd-1])])
img = np.array(img)
s = np.dot(
np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1))
)[
0, 0
]
else:
img = np.array(img)
s = np.dot(
np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1))
)[
0, 0
]
res_index = []
for nx in range(nd):
res_index.append(int(r_index[nx]))
index = tuple([n, nw]) + tuple(res_index)
res[index] += s
return res |
class Conv(RunAll):
@staticmethod
def export_conv_1D_no_padding() -> None:
x = np.array(
[
[
[
0.0, 1.0, 2.0, 3.0, 4.0
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
1.0, 1.0, 1.0
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_1D_no_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_1D_with_padding() -> None:
x = np.array(
[
[
[
0.0, 1.0, 2.0, 3.0, 4.0
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
1.0, 1.0, 1.0
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, pads=[1, 1])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) |
name = "conv_1D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![1, 1].span()),"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_2D_no_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[0, 0, 0, 0],)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, |
name, Trait.NN)
@staticmethod
def export_con_2D_with_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![1, 1, 1, 1].span()),"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_3D_no_padding() -> None:
x = np.array(
[
[
[
[
[ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24]
],
[
[ 25 |
, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49]
],
[
[ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74]
],
[
[ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99]
],
[
[100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124]
]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
],
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
],
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
]
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_3D_no_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None," |
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_3D_with_padding() -> None:
x = np.array(
[
[
[
[
[ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24]
],
[
[ 25, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49]
],
[
[ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74]
],
[
[ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99]
],
[
[100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124]
]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
],
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
],
[
[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]
]
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x1 |
6))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_3D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1].span()),"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_4D_no_padding() -> None:
x = np.array(
[
[
[
[
[
[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8]
],
[
[ 9, 10, 11],[12, 13, 14],[15, 16, 17]
],
[
[18, 19, 20],[21, 22, 23],[24, 25, 26]
]
],
[
[
[27, 28, 29],[30, 31, 32],[33, 34, 35]
],
[
[36, 37, 38],[39, 40, 41],[42, 43, 44]
],
[
[45, 46, 47],[48, 49, 50],[51, 52, 53]
]
],
[
[
[54, 55, 56],[57, 58, 59],[60, 61, 62]
],
[
[63, 64, 65],[66, 67, 68],[69, 70, 71]
],
[ |
[72, 73, 74],[75, 76, 77],[78, 79, 80]
]
]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[
[1., 1.],[1., 1.]
],
[
[1., 1.],[1., 1.]
]
],
[
[
[1., 1.],[1., 1.]
],
[
[1., 1.],[1., 1.]
]
]
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_4D_no_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_4D_with_padding() -> None:
x = np.array(
[
[
[
[
[
[ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8]
],
[
[ 9, 10, 11] |
,[12, 13, 14],[15, 16, 17]
],
[
[18, 19, 20],[21, 22, 23],[24, 25, 26]
]
],
[
[
[27, 28, 29],[30, 31, 32],[33, 34, 35]
],
[
[36, 37, 38],[39, 40, 41],[42, 43, 44]
],
[
[45, 46, 47],[48, 49, 50],[51, 52, 53]
]
],
[
[
[54, 55, 56],[57, 58, 59],[60, 61, 62]
],
[
[63, 64, 65],[66, 67, 68],[69, 70, 71]
],
[
[72, 73, 74],[75, 76, 77],[78, 79, 80]
]
]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[
[1., 1.],[1., 1.]
],
[
[1., 1.],[1., 1.]
]
],
[
[
[1., 1.],[1., 1.]
],
[
[1., 1.],[1., 1.]
]
]
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1, 1, 1])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x1 |
6))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_4D_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1, 1, 1].span()),"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_with_autopad_same() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, kernel_shape=[3, 3],auto_pad="SAME_LOWER",strides = [2, 2])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_autopad_same"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "O |
ption::Some(AUTO_PAD::SAME_LOWER),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 3].span()),"
func_sig += "Option::None,"
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_with_strides_asymmetric_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 0, 1, 0],strides = [2, 2])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_strides_asymmetric_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 3].span()),"
func_sig += "Option::Some(array![1, 0, 1, 0].span()),"
func_sig += "Option::Som |
e(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_with_strides_with_padding() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],strides = [2, 2])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_strides_with_padding"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 3].span()),"
func_sig += "Option::Some(array![1, 1, 1, 1].span()),"
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_conv_with_2_groups() -> None:
x = np.array(
[
[ |
[
[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]]
]
]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv(x, w, group = 2)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_2D_with_2_groups"
func_sig = "NNTrait::conv("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(2),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def conv_transpose(
X,
W,
B=None,
auto_pad=None,
dilations=None,
group=None,
kernel_shape=None,
output_padding=None,
output_shape=None,
pads=None,
strides=None,
):
if dilations is None:
dilations = [1 for s in X.shape[2:]]
if kernel_shape is None:
kernel_shape = W.shape[2:]
if output_padding is None:
output_padding = [0 for s in X.shape[2:]] * 2
if strides is None:
strides = [1 for s in X.shape[2:]]
if pads is None and auto_pad not in {"SAME_UPPER", "SAME_LOWER"}:
pads = [0 for i in range(2 * len(strides))]
if pads is None:
if output_shape is None:
output_shape = [
X.shape[i + 2] * strides[i] for i in range(len(strides))
]
total_padding = [
strides[i] * (X.shape[i + 2] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- output_shape[i]
for i in range(len(output_shape))
]
pads_1 = []
pads_2 = []
for i in range(len(output_shape)):
if auto_pad == "SAME_UPPER":
pads_1.append(total_padding[i]
pads_2.append(total_padding[i] - (total_padding[i]
else:
pads_1.append(total_padding[i] - (total_padding[i]
pads_2.append(total_padding[i]
pads = pads_1 + pads_2
n_dims = len(pads)
else:
n_dims = len(X.shape) - 2
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
if output_shape is None:
output_shape = [
strides[i] * (X.shape[i + 2] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- new_pads[i, :].sum()
for i in range(n_dims)
]
kernel_shape = W.shape[2:]
kernel_size = np.prod(kernel_shape)
num_output_channels |
= W.shape[1] * group
kernel_dim = num_output_channels
C = X.shape[1]
m = kernel_dim
n = np.prod(X.shape[2:])
k = C
w_reshaped = W.reshape((group, k, m))
final = None
if group == 1:
for image_id in range(X.shape[0]):
w_t = w_reshaped[0].T
gemm = np.matmul(w_t, X[image_id].reshape((k, n)))
gemmc = gemm.reshape((num_output_channels, -1, gemm.shape[-1]))
for c in range(num_output_channels):
res = col2im_naive_implementation(
gemmc[c], output_shape, kernel_shape, dilations, pads, strides
)
if final is None:
final = np.empty(
X.shape[:1] + (num_output_channels,) + res.shape,
dtype=X.dtype,
)
if B is not None:
res += B[c]
final[image_id, c, ...] = res[...]
else:
final = np.zeros((X.shape[0], num_output_channels ) + tuple(output_shape))
output_array = []
for group_id in range(group):
group_X = X[:, group_id * C
group_W = W[group_id * num_output_channels
group_output = conv_transpose(
group_X,
group_W,
B=B,
auto_pad=auto_pad,
dilations=dilations,
group=1,
kernel_shape=kernel_shape,
output_padding=output_padding,
output_shape=output_shape,
pads=pads,
strides=strides,
)
group_output = np.array(group_output[0])
output_array.append(group_output)
for image_id in range(X.shape[0]):
for group_id in range(group):
group_output = output_array[group_id]
final[image_id, group_id:(group_id+1), ...] = group_output[image_id, ...]
return (final.astype(X.dtype),)
def _get_in |
dices(i, shape):
res = np.empty((len(shape),), dtype=np.int64)
k = len(shape) - 1
while k > 0:
m = i % shape[k]
res[k] = m
i -= m
i /= shape[k]
k -= 1
res[0] = i
return res
def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides):
n_input_plane = X.shape[0]
kernel_size = np.prod(kernel_shape)
if n_input_plane % kernel_size != 0:
raise ValueError(
f"Expected size of input's dimension 1 to be divisible by the "
f"product of kernel_size={kernel_size}, "
f"but got input.size(1)={n_input_plane} "
f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}."
)
input_length = X.shape[1]
n_dims = len(output_shape)
n_blocks = []
for i in range(n_dims):
n_block = (
output_shape[i]
+ pads[i, :].sum()
- dilations[i] * (kernel_shape[i] - 1)
- 1
)
n_blocks.append(n_block)
block_size = np.prod(n_blocks)
if input_length != block_size:
raise ValueError(
f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"expected size of input's dimension 2 to match the calculated number of "
f"sliding blocks {n_blocks} = {block_size}, "
f"but got input.size(2)={input_length}.",
)
def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides):
n_dims = len(pads)
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
_col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides)
data_col = data
data_im = np.zeros(image_shape, dtype=data.dtype)
dim_col = []
for i in range(n_dims):
col = (
image_shape[i]
+ new_pads[ |
i, :].sum()
- (dilations[i] * (kernel_shape[i] - 1) + 1)
)
dim_col.append(col)
kernel_size = np.prod(kernel_shape)
col_size = np.prod(dim_col)
for c_col in range(kernel_size):
offset = _get_indices(c_col, kernel_shape)
for col in range(col_size):
ind_col = _get_indices(col, dim_col)
ind_im = []
for i in range(n_dims):
ind = (
ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i]
)
ind_im.append(ind)
if not _is_out(ind_im, data_im.shape):
data_im[tuple(ind_im)] += data_col[c_col, col]
return data_im
def _is_out(ind, shape):
for i, s in zip(ind, shape):
if i < 0:
return True
if i >= s:
return True
return False |
class Conv_transpose(RunAll):
@staticmethod
def export_conv_transpose() -> None:
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_1d() -> None:
x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32)
w = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype(
np.float32
)
y = conv_transpose(x, w, group=1)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_1d"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::No |
ne,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_3d() -> None:
x = np.array(
[
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
],
[
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
[35.0, 36.0, 37.0, 38.0, 39.0],
],
[
[40.0, 41.0, 42.0, 43.0, 44.0],
[45.0, 46.0, 47.0, 48.0, 49.0],
[50.0, 51.0, 52.0, 53.0, 54.0],
[55.0, 56.0, 57.0, 58.0, 59.0],
],
]
]
]
).astype(np.float32)
w = np.array(
[
[
[
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], |
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_3d"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_attributes() -> None:
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_attributes"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
fu |
nc_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_pads() -> None:
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1,strides=[3, 2],output_shape=[10, 8], kernel_shape=[3, 3], output_padding=[1, 1],)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_pads"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 3].span()),"
func_sig += "Option::Some(array![1, 1].span()),"
func_sig += "Option::Some(array![10, 8].span()),"
func_sig += "Option::None,"
func_sig += "Option::Some(array![3, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_dilations() -> None:
x = np.array(
[[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]]
).astype(np.float32)
w = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32)
y = conv_transpose(x, w, group=1, dilations=[2, 2])[0]
x = Tensor(Dtype.FP16 |
x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_dilations"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![2, 2].span()),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_autopad_same() -> None:
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=1, auto_pad="SAME_UPPER", strides=[2, 2])[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_autopad_same"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::Some(AUTO_PAD::SAME_UPPER),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None," |
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_group_2() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]]
]
]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=2)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_group_2"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(2),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,)"
make_test(
[x, w], y, func_sig, name, Trait.NN)
@staticmethod
def export_convtranspose_group_2_image_3() -> None:
x = np.array(
[
[
[
[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]
]
], |
[
[
[18.0, 19.0, 20.0], [21.0, 22.0, 23.0], [24.0, 25.0, 26.0]
],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]
]
],
[
[
[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]
],
[
[9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]
]
]
]
).astype(np.float32)
w = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
y = conv_transpose(x, w, group=2)[0]
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "conv_transpose_group_2_image_3"
func_sig = "NNTrait::conv_transpose("
func_sig += "@input_0,"
func_sig += "@input_1,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(2),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None,)"
make_test(
[x, w], y, func_sig, name, Trait.NN) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Cos(RunAll):
@staticmethod
def cos_fp8x23():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.cos(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cos_fp8x23"
make_test([x], y, "input_0.cos()", name)
@staticmethod
def cos_fp16x16():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.cos(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cos_fp16x16"
make_test([x], y, "input_0.cos()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Cosh(RunAll):
@staticmethod
def cosh_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.cosh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cosh_fp8x23"
make_test([x], y, "input_0.cosh()", name)
@staticmethod
def cosh_fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.cosh(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cosh_fp16x16"
make_test([x], y, "input_0.cosh()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Cumsum(RunAll):
@staticmethod
def cumsum_u32():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.uint32)
y = np.array([1, 3, 6, 10, 15]).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_1d_default"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.uint32)
y = np.array([0, 1, 3, 6, 10]).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name)
def reverse():
x = np.array([1, 2, 3, 4, 5]).astype(np.uint32)
y = np.array([15, 14, 12, 9, 5]).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_1d_reverse"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name)
def reverse_exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.uint32)
y = np.array([14, 12, 9, 5, 0]).astype(np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_1d_reverse_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name)
default()
exclusive()
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D( |
):
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.uint32).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.uint32).reshape((2, 3))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.uint32).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype(
np.uint32).reshape((2, 3))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "cumsum_u32_2d_axis_1"
make_test(
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D()
@staticmethod
def cumsum_i32():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
y = np.array([1, 3, 6, 10, 15]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_1d_default"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
y = np.array([0, 1, 3, 6, 10]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::So |
me(false))", name)
def reverse():
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
y = np.array([15, 14, 12, 9, 5]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_1d_reverse"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name)
def reverse_exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int32)
y = np.array([14, 12, 9, 5, 0]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_1d_reverse_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name)
default()
exclusive()
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D():
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int32).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.int32).reshape((2, 3))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int32).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype(
np.int32).reshape((2, 3))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "cumsum_i32_2d_axis_1"
make_test( |
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D()
@staticmethod
def cumsum_i8():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.int8)
y = np.array([1, 3, 6, 10, 15]).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_1d_default"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int8)
y = np.array([0, 1, 3, 6, 10]).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name)
def reverse():
x = np.array([1, 2, 3, 4, 5]).astype(np.int8)
y = np.array([15, 14, 12, 9, 5]).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_1d_reverse"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name)
def reverse_exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int8)
y = np.array([14, 12, 9, 5, 0]).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_1d_reverse_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name)
default()
exclusive() |
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D():
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int8).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.int8).reshape((2, 3))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int8).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype(
np.int8).reshape((2, 3))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "cumsum_i8_2d_axis_1"
make_test(
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D()
@staticmethod
def cumsum_fp8x23():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([1, 3, 6, 10, 15]).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_1d_default"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([0, 1, 3, 6, 10]).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl |
.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name)
def reverse():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([15, 14, 12, 9, 5]).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_1d_reverse"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name)
def reverse_exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([14, 12, 9, 5, 0]).astype(np.int64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_1d_reverse_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name)
default()
exclusive()
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D():
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int64).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.int64).reshape((2, 3))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x2 |
3_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int64).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype(
np.int64).reshape((2, 3))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "cumsum_fp8x23_2d_axis_1"
make_test(
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D()
@staticmethod
def cumsum_fp16x16():
def cumsum_1D():
def default():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([1, 3, 6, 10, 15]).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_1d_default"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([0, 1, 3, 6, 10]).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_1d_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(false))", name)
def reverse():
x = np.array([ |
1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([15, 14, 12, 9, 5]).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_1d_reverse"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(false), Option::Some(true))", name)
def reverse_exclusive():
x = np.array([1, 2, 3, 4, 5]).astype(np.int64)
y = np.array([14, 12, 9, 5, 0]).astype(np.int64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_1d_reverse_exclusive"
make_test(
[x], y, "input_0.cumsum(0, Option::Some(true), Option::Some(true))", name)
default()
exclusive()
reverse()
reverse_exclusive()
cumsum_1D()
def cumsum_2D():
def axis_0():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int64).reshape((2, 3))
y = np.array([1, 2, 3, 5, 7, 9]).astype(
np.int64).reshape((2, 3))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_2d_axis_0"
make_test(
[x], y, "input_0.cumsum(0, Option::None(()), Option::None(()))", name)
def axis_1():
x = np.array([1, 2, 3, 4, 5, 6]).astype(
np.int64).reshape((2, 3))
y = np.array([1, 3, 6, 4, 9, 15]).astype( |
np.int64).reshape((2, 3))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "cumsum_fp16x16_2d_axis_1"
make_test(
[x], y, "input_0.cumsum(1, Option::None(()), Option::None(()))", name)
axis_0()
axis_1()
cumsum_2D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def depth_to_space(data: np.ndarray, blocksize: int = 2, mode = "DCR") -> np.ndarray:
if len(data.shape) != 4:
raise RuntimeError(f"Unexpected shape {data.shape!r}.")
b, c, h, w = data.shape
if mode == "DCR":
tmpshape = (
b,
blocksize,
blocksize,
c
h,
w,
)
reshaped = data.reshape(tmpshape)
transposed = np.transpose(reshaped, [0, 3, 4, 1, 5, 2])
else:
tmpshape = (
b,
c
blocksize,
blocksize,
h,
w,
)
reshaped = data.reshape(tmpshape)
transposed = np.transpose(reshaped, [0, 1, 4, 2, 5, 3])
finalshape = (
b,
c
h * blocksize,
w * blocksize,
)
y = np.reshape(transposed, finalshape)
return y |
class Depth_to_space(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64)
y = depth_to_space(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "depth_to_space_fp8x23"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')",
name, Trait.NN)
@staticmethod
def fp16x16():
x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float16)
y = depth_to_space(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "depth_to_space_fp16x16"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')",
name, Trait.NN)
@staticmethod
def fpi8():
x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int8)
y = depth_to_space(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "depth_to_space_i8"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')",
name, Trait.NN)
@staticmethod
def fpi32():
x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int32)
y = depth_to_space(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "depth_to_space_i32"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')",
name, Trait.NN)
@staticmethod
def fpu32():
x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.uint32)
y = depth_to_space(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatt |
en())
name = "depth_to_space_u32"
make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')",
name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Div(RunAll):
@staticmethod
def div_u32():
def default():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.uint32)
z = x / y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "div_u32"
make_test([x, y], z, "input_0 / input_1", name)
def broadcast():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(1, 3, (1, 3, 1)).astype(np.uint32)
z = x / y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "div_u32_broadcast"
make_test([x, y], z, "input_0 / input_1", name)
default()
broadcast()
@staticmethod
def div_i32():
def default():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32)
z = x / y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "div_i32"
make_test([x, y], z, "input_0 / input_1", name)
def broadcast():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(1, 3, (1, 3, 1)).astype(np.int32)
z = x / y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "div_i32_broadcast"
make_test([x, y], z, "input_0 / input_1", name)
default()
broadcast()
@staticmethod
def div_i8():
def default():
x = np.random. |
randint(1, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8)
z = x / y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "div_i8"
make_test([x, y], z, "input_0 / input_1", name)
def broadcast():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(1, 3, (1, 3, 1)).astype(np.int8)
z = x / y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "div_i8_broadcast"
make_test([x, y], z, "input_0 / input_1", name)
default()
broadcast()
@staticmethod
def div_fp8x23():
def default():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
z = x / y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "div_fp8x23"
make_test([x, y], z, "input_0 / input_1", name)
def broadcast():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(1, 3, (1, 3, 1)).astype(np.float64)
z = x / y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "div_fp8x23_broadcast"
make_ |
test([x, y], z, "input_0 / input_1", name)
default()
broadcast()
@staticmethod
def div_fp16x16():
def default():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
z = x / y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "div_fp16x16"
make_test([x, y], z, "input_0 / input_1", name)
def broadcast():
x = np.random.randint(1, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(1, 3, (1, 3, 1)).astype(np.float64)
z = x / y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "div_fp16x16_broadcast"
make_test([x, y], z, "input_0 / input_1", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Equal(RunAll):
@staticmethod
def equal_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_u32"
make_test([x, y], z, "input_0.equal(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.equal(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_u32_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast()
@staticmethod
def equal_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_i32"
make_test([x, y], z, "input_0.equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.equal(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_i32_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast()
@staticmethod |
def equal_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_i8"
make_test([x, y], z, "input_0.equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.equal(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_i8_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast()
@staticmethod
def equal_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.equal(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_fp8x23"
make_test([x, y], z, "input_0.equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.equal(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "e |
qual_fp8x23_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast()
@staticmethod
def equal_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_fp16x16"
make_test([x, y], z, "input_0.equal(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.equal(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "equal_fp16x16_broadcast"
make_test([x, y], z, "input_0.equal(@input_1)", name)
default()
broadcast() |
import numpy as np
from math import erf
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Erf(RunAll):
@staticmethod
def erf_fp8x23():
x = np.asarray([0.12, -1.66, 3.4, 4.8, 2.7]).astype(np.float64).reshape(1,5)
y = np.asarray([erf(value) for value in x[0]]).astype(np.float64).reshape(1,5)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "erf_fp8x23"
make_test([x], y, "input_0.erf()", name)
@staticmethod
def erf_fp16x16():
x = np.asarray([0.12, -1.66, 3.4, 4.8, 2.7]).astype(np.float64).reshape(1,5)
y = np.asarray([erf(value) for value in x[0]]).astype(np.float64).reshape(1,5)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "erf_fp16x16"
make_test([x], y, "input_0.erf()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Exp(RunAll):
@staticmethod
def exp_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.exp(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "exp_fp8x23"
make_test([x], y, "input_0.exp()", name)
@staticmethod
def exp_fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.exp(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "exp_fp16x16"
make_test([x], y, "input_0.exp()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Gather(RunAll):
@staticmethod
def gather_fp16x16():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32)
y = x1.take(x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_fp16x16_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64)
y = x1.take(x2, axis=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_fp16x16_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64)
y = x1.take(x2, axis=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_fp16x16_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", |
name= name)
def negative_indices():
x1 = np.arange(10).astype(np.float32)
x2 = np.array([0, -9, -10]).astype(np.int64)
y = np.take(x1, x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_negative_indices"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))",
name= name)
def negative_axis():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32)
y = x1.take(x2, axis=-1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "gather_negative_axis"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(-1))",
name= name)
default()
axis1()
axis2()
negative_indices()
negative_axis() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |