file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
tests/python/unittest/test_target_codegen_hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
import pytest
import re
import sys
import tvm
import tvm.relay
import tvm.testing
import tvm.contrib.hexagon as hexagon
@pytest.fixture(autouse=True)
def register_linker():
original_linker = hexagon.hexagon_link()
# Register a phony linker, so that we can test codegen without a Hexagon toolchain.
hexagon.register_linker(lambda: "/bin/true")
yield None
# Restore registration.
hexagon.register_linker(original_linker)
@tvm.testing.requires_hexagon
def test_basic():
target = tvm.target.hexagon("v66", hvx=128)
def check_add(offload):
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
B = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + B[i], name="C")
s = tvm.te.create_schedule(C.op)
if offload:
xo, xi = s[C].split(s[C].op.axis[0], nparts=1)
s[C].bind(xo, tvm.te.thread_axis("pipeline"))
m = tvm.build(s, [C, A, B], target=target, name="offload_add")
hexm = m.imported_modules[0]
else:
hexm = tvm.build(
s, [C, A, B], target=tvm.target.Target(target, target), name="native_add"
)
asm = hexm.get_source("s")
vadds = re.findall(r"v[0-9]+.b = vadd\(v[0-9]+.b,v[0-9]+.b\)", asm)
assert vadds # Check that it's non-empty
check_add(True)
check_add(False)
@tvm.testing.requires_hexagon
def test_llvm_target_features():
target = tvm.target.hexagon("v66", hvx=128)
# Define some trivial compute
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + 1, name="C")
s = tvm.te.create_schedule(C.op)
m = tvm.build(s, [C, A], target=tvm.target.Target(target, target), name="add_one")
llvm_ir = m.get_source("ll")
# Make sure we find +hvx-length128b in "attributes".
fs = re.findall(r"attributes.*\+hvx-length128b", llvm_ir)
assert fs # Check that it's non-empty
@tvm.testing.requires_hexagon
def test_alloc_vtcm():
target = tvm.target.hexagon("v66")
buf_len = 2048
A = tvm.te.placeholder((buf_len,), name="A", dtype="int8")
B = tvm.te.placeholder((buf_len,), name="B", dtype="int8")
A_buf = tvm.te.compute((buf_len,), lambda *i: A(*i), "A_buf")
B_buf = tvm.te.compute((buf_len,), lambda *i: B(*i), "B_buf")
C = tvm.te.compute((buf_len,), lambda *i: A_buf(*i) + B_buf(*i), name="C")
s = tvm.te.create_schedule(C.op)
# Use VTCM for each buffer.
s[A_buf].set_scope("local.vtcm")
s[B_buf].set_scope("local.vtcm")
config = {"tir.add_lower_pass": hexagon.ir_lower_vtcm_pass()}
with tvm.transform.PassContext(config=config):
irmod = tvm.lower(s, [A, B, C], name="alloc_vtcm")
calls = re.findall("HexagonBackend[A-Za-z]*VTCM", str(irmod["alloc_vtcm"]))
assert "HexagonBackendAllocateVTCM" in calls
assert "HexagonBackendFreeVTCM" in calls
@tvm.testing.requires_hexagon
def test_llvm_options():
target = tvm.target.hexagon("v66", llvm_options="-hexagon-noopt")
Zero = tvm.te.compute((10,), lambda _: tvm.tir.const(0, "int32"))
s = tvm.te.create_schedule(Zero.op)
tvm.build(s, [Zero], target=target, name="zero")
# Check that BuildHexagon hasn't crashed because of target attribute
# type mismatch.
assert re.search("-hexagon-noopt", str(target))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_llvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import ctypes
import json
import math
import numpy as np
import pytest
import re
import sys
import tvm
import tvm.testing
from tvm import te
from tvm.contrib import clang, utils
from tvm.relay.backend import Runtime
from tvm.script import tir as T
from tvm.target.codegen import llvm_get_intrinsic_name, llvm_lookup_intrinsic_id
@tvm.testing.requires_llvm
def test_llvm_intrin():
ib = tvm.tir.ir_builder.create()
n = tvm.runtime.convert(4)
A = ib.pointer("float32", name="A")
args = [tvm.tir.call_intrin("handle", "tir.address_of", A[0]), 0, 3, 1]
ib.emit(tvm.tir.Evaluate(tvm.tir.Call("int32", "tir.prefetch", args)))
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "prefetch"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_void_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8", name="A")
# Create an intrinsic that returns void.
x = tvm.tir.call_llvm_intrin("", "llvm.va_start", tvm.tir.const(1, "uint32"), A.asobject().data)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_intrinsic_id():
orig_name = "llvm.x86.sse2.pmadd.wd"
intrin_id = llvm_lookup_intrinsic_id(orig_name)
name = llvm_get_intrinsic_name(intrin_id)
assert orig_name == name
@tvm.testing.requires_llvm
def test_llvm_overloaded_intrin():
# Name lookup for overloaded intrinsics in LLVM 4- requires a name
# that includes the overloaded types.
if tvm.target.codegen.llvm_version_major() < 5:
return
def use_llvm_intrinsic(A, C):
ib = tvm.tir.ir_builder.create()
L = A.vload((0, 0))
I = tvm.tir.call_llvm_pure_intrin(
"int32", "llvm.ctlz", tvm.tir.const(2, "uint32"), L, tvm.tir.const(0, "int1")
)
S = C.vstore((0, 0), I)
ib.emit(S)
return ib.get()
A = tvm.te.placeholder((1, 1), dtype="int32", name="A")
C = tvm.te.extern(
(1, 1), [A], lambda ins, outs: use_llvm_intrinsic(ins[0], outs[0]), name="C", dtype="int32"
)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A, C], target="llvm")
@tvm.testing.requires_llvm
def test_llvm_lookup_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8x8", name="A")
z = tvm.tir.const(0, "int32")
x = tvm.tir.call_llvm_pure_intrin(
"uint8x8", "llvm.ctpop.v8i8", tvm.tir.const(1, "uint32"), A[z]
)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_large_uintimm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
A = te.compute((), lambda: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
def check_llvm():
f = tvm.build(s, [A], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.empty((), dtype=A.dtype, device=dev)
f(a)
assert a.numpy() == value + 3
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_persist_parallel():
n = 128
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
C = te.compute(A.shape, lambda *i: te.sqrt(B(*i)) * 2 + 2, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=8)
xo1, xo2 = s[C].split(xo, nparts=1)
s[B].compute_at(s[C], xo1)
s[B].parallel(s[B].op.axis[0])
s[B].pragma(s[B].op.axis[0], "parallel_barrier_when_finish")
s[C].parallel(xi)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xi, "parallel_stride_pattern")
def check_llvm():
# BUILD and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), np.sqrt(a.numpy() + 1) * 2 + 2, rtol=1e-5)
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_flip_pipeline():
def check_llvm(nn, base):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base), name="A")
C = te.compute((n,), lambda i: A(nn + base - i - 1), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base)).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[::-1][:n])
check_llvm(4, 0)
check_llvm(128, 8)
check_llvm(3, 0)
check_llvm(128, 1)
@tvm.testing.requires_llvm
def test_llvm_vadd_pipeline():
def check_llvm(n, lanes):
A = te.placeholder((n,), name="A", dtype="float32x%d" % lanes)
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
_, xi = s[C].split(xi, factor=2)
s[C].parallel(xo)
s[C].vectorize(xi)
s[B].compute_at(s[C], xo)
xo, xi = s[B].split(B.op.axis[0], factor=2)
s[B].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.empty((n,), A.dtype).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_llvm(64, 2)
check_llvm(512, 2)
@tvm.testing.requires_llvm
def test_llvm_madd_pipeline():
def check_llvm(nn, base, stride):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base, stride), name="A")
C = te.compute((n, stride), lambda i, j: A(base + i, j) + 1, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base, stride)).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros((n, stride), dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[base:] + 1)
check_llvm(64, 0, 2)
check_llvm(4, 0, 1)
with tvm.transform.PassContext(config={"tir.noalias": False}):
check_llvm(4, 0, 3)
@tvm.testing.requires_llvm
def test_llvm_temp_space():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A(i) + 1, name="B")
C = te.compute(A.shape, lambda i: B(i) + 1, name="C")
s = te.create_schedule(C.op)
def check_llvm():
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1 + 1)
check_llvm()
@tvm.testing.requires_llvm
def test_multiple_func():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm():
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
fadd2 = m["fadd2"]
fadd1 = m["fadd1"]
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd1(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
fadd2(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_condition():
def check_llvm(n, offset):
A = te.placeholder((n,), name="A")
C = te.compute((n,), lambda i: tvm.tir.if_then_else(i >= offset, A[i], 0.0), name="C")
s = te.create_schedule(C.op)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), A.dtype, dev)
f(a, c)
c_np = a.numpy()
c_np[:offset] = 0
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64, 8)
@tvm.testing.requires_llvm
def test_llvm_bool():
def check_llvm(n):
A = te.placeholder((n,), name="A", dtype="int32")
C = te.compute((n,), lambda i: A[i].equal(1).astype("float"), name="C")
s = te.create_schedule(C.op)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
c_np = a.numpy() == 1
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero():
def check_llvm(n):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
# build and invoke the kernel.
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero_bound_checkers():
def check_llvm(n):
with tvm.transform.PassContext(config={"tir.instrument_bound_checkers": True}):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
# build and invoke the kernel.
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_alignment():
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] * 3, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=8)
s[B].vectorize(tx)
f = tvm.build(s, [A, B], "llvm", name="test_alignment")
lines = f.get_source().split("\n")
# Check alignment on load/store.
for l in lines:
if "align" in l and "4 x float" in l:
assert "align 32" in l
# Check parameter alignment. This looks for the definition of the
# outlined "compute_" function to see if there is an "align" attribute
# listed there.
def has_param_alignment():
for l in lines:
if re.search(r"test_alignment_compute_\([^(]*align [0-9]", l):
return True
return False
if tvm.target.codegen.llvm_version_major() >= 5:
assert has_param_alignment()
# Check for assume intrinsics. This isn't 100% accurate, since it just
# checks if the llvm.assume is there, but detailed check would require
# a much more detailed analysis of the LLVM IR.
def has_call_to_assume():
for l in lines:
if re.search(r"call.*llvm.assume", l):
return True
return False
assert has_call_to_assume()
@tvm.testing.requires_llvm
def test_llvm_div():
"""Check that the semantics of div and mod is correct"""
def check(start, end, dstart, dend, dtype, floor_div=False):
div = tvm.te.floordiv if floor_div else tvm.tir.truncdiv
mod = tvm.te.floormod if floor_div else tvm.tir.truncmod
# A are dividends, B are divisors. Note that we add 1 to make include end in the range.
A = te.placeholder((end - start + 1,), name="A", dtype=dtype)
B = te.placeholder((dend - dstart + 1,), name="B", dtype=dtype)
# We clip values with min and max so that simplifiers know the ranges of values
def clipa(x):
return tvm.te.min(tvm.tir.const(end, dtype), tvm.te.max(tvm.tir.const(start, dtype), x))
def clipb(x):
return tvm.te.min(
tvm.tir.const(dend, dtype), tvm.te.max(tvm.tir.const(dstart, dtype), x)
)
# If the range is just a single point, use the constant itself
if start == end:
def clipa(x):
return tvm.tir.const(start, dtype)
if dstart == dend:
def clipb(x):
return tvm.tir.const(dstart, dtype)
# D are division results and M are modulo results
[D, M] = te.compute(
(end - start + 1, dend - dstart + 1),
lambda i, j: (div(clipa(A[i]), clipb(B[j])), mod(clipa(A[i]), clipb(B[j]))),
)
s = te.create_schedule([D.op, M.op])
f = tvm.build(s, [A, B, D, M], "llvm")
# Fill input arrays with values
A_arr = tvm.nd.empty((end - start + 1,), dtype)
B_arr = tvm.nd.empty((dend - dstart + 1,), dtype)
A_arr.copyfrom(np.arange(start, end + 1, dtype=dtype))
B_np = np.arange(dstart, dend + 1, dtype=dtype)
# If the range of the divisor contains 0, replace it with 1 to avoid division by zero
if dend >= 0 and dstart <= 0:
B_np[-dstart] = 1
B_arr.copyfrom(B_np)
D_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
M_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
# Run the function and convert the results to numpy
f(A_arr, B_arr, D_arr, M_arr)
D_arr = D_arr.numpy()
M_arr = M_arr.numpy()
# This helper just prints additional info on failure
def _show_info():
print("dtype: {}".format(dtype))
print("dividend range: [{}, {}]".format(start, end))
print("divisor range: [{}, {}]".format(dstart, dend))
lowered = tvm.lower(s, [A, B, D, M], simple_mode=True)
print("Lowered code:")
print(lowered)
# Check that the computed values are correct
for i in range(start, end + 1):
for j in range(dstart, dend + 1):
if j == 0:
continue
if floor_div:
dref = i // j
mref = i % j
else:
dref = int(float(i) / j)
mref = int(math.fmod(i, j))
if D_arr[i - start, j - dstart] != dref:
_show_info()
raise AssertionError(
"Incorrect division result: {}({}, {}) is {} "
"but should be {}".format(
div.__name__, i, j, D_arr[i - start, j - dstart], dref
)
)
if M_arr[i - start, j - dstart] != mref:
_show_info()
raise AssertionError(
"Incorrect modulo result: {}({}, {}) is {} "
"but should be {}".format(
mod.__name__, i, j, M_arr[i - start, j - dstart], mref
)
)
# Try different ranges to cover different cases
for start, end in [
(-12, -12),
(-11, -1),
(-11, 0),
(0, 0),
(12, 12),
(1, 11),
(0, 11),
(-11, 11),
]:
for dstart, dend in [
(-11, -1),
(-11, 0),
(-4, -4),
(-2, -2),
(1, 11),
(0, 11),
(4, 4),
(2, 2),
(-11, 11),
]:
if end < start or dend < dstart or (dend == 0 and dstart == 0):
continue
check(start, end, dstart, dend, "int32", floor_div=False)
check(start, end, dstart, dend, "int32", floor_div=True)
check(start, end, dstart, dend, "int8", floor_div=False)
check(start, end, dstart, dend, "int8", floor_div=True)
if start >= 0 and dstart >= 0:
check(start, end, dstart, dend, "uint32", floor_div=False)
check(start, end, dstart, dend, "uint32", floor_div=True)
# Additional tests for uint8
for dstart, dend in [(0, 11), (1, 11), (2, 2), (4, 4)]:
check(123, 133, dstart, dend, "uint8", floor_div=False)
check(123, 133, dstart, dend, "uint8", floor_div=True)
check(0, 255, dstart, dend, "uint8", floor_div=False)
check(0, 255, dstart, dend, "uint8", floor_div=True)
@tvm.testing.requires_llvm
def test_llvm_fp_math():
def check_llvm_reciprocal(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.div(1.0, (1e37 * A[i])), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), 100, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_reciprocal(4)
check_llvm_reciprocal(8)
check_llvm_reciprocal(16)
def check_llvm_sigmoid(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.sigmoid(A[i]), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), -1000, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_sigmoid(4)
check_llvm_sigmoid(8)
check_llvm_sigmoid(16)
@tvm.testing.requires_llvm
def test_dwarf_debug_information():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm_object():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
temp = utils.tempdir()
o_path = temp.relpath("temp.o")
m.save(o_path)
import shutil
import subprocess
import sys
# Try the dwarfdump utility (OS X)
if shutil.which("dwarfdump"):
output = subprocess.check_output(["dwarfdump", o_path])
assert re.search(r"""DW_AT_name\\t\("fadd1"\)""", str(output))
assert re.search(r"""DW_AT_name\\t\("fadd2"\)""", str(output))
# Try gobjdump (OS X)
if shutil.which("gobjdump"):
output = subprocess.check_output(["gobjdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
# Try objdump (Linux) - Darwin objdump has different DWARF syntax.
if shutil.which("objdump") and sys.platform != "darwin":
output = subprocess.check_output(["objdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
def check_llvm_ir():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], target="llvm -mtriple=aarch64-linux-gnu")
ll = m.get_source("ll")
# On non-Darwin OS, don't explicitly specify DWARF version.
import re
assert not re.search(r""""Dwarf Version""" "", ll)
assert re.search(r"""llvm.dbg.value""", ll)
# Try Darwin, require DWARF-2
m = tvm.build([f1, f2], target="llvm -mtriple=x86_64-apple-darwin-macho")
ll = m.get_source("ll")
assert re.search(r"""i32 4, !"Dwarf Version", i32 2""", ll)
assert re.search(r"""llvm.dbg.value""", ll)
check_llvm_object()
check_llvm_ir()
@tvm.testing.requires_llvm
def test_llvm_shuffle():
a = te.placeholder((8,), "int32")
b = te.placeholder((8,), "int32")
c = te.compute((8,), lambda x: a[x] + b[7 - x])
sch = te.create_schedule(c.op)
def my_vectorize():
def vectorizer(op):
store = op.body
idx = tvm.tir.Ramp(tvm.tir.const(0, "int32"), tvm.tir.const(1, "int32"), 8)
value = store.value
b_idx = tvm.tir.Shuffle([idx], [tvm.tir.const(i, "int32") for i in range(7, -1, -1)])
new_a = tvm.tir.BufferLoad(value.a.buffer, [idx])
new_b = tvm.tir.BufferLoad(value.b.buffer, [b_idx])
value = new_a + new_b
return tvm.tir.BufferStore(store.buffer, new_a + new_b, [idx])
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="my_vectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, my_vectorize())]}):
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
module = tvm.build(sch, [a, b, c])
a_ = tvm.nd.array(np.arange(1, 9, dtype="int32"))
b_ = tvm.nd.array(np.arange(8, 0, -1, dtype="int32"))
c_ = tvm.nd.array(np.zeros((8,), dtype="int32"))
module(a_, b_, c_)
tvm.testing.assert_allclose(c_.numpy(), (a_.numpy() * 2).astype("int32"))
def np_float2np_bf16(arr):
"""Convert a numpy array of float to a numpy array
of bf16 in uint16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
return np.right_shift(orig + bias, 16).astype("uint16")
def np_float2tvm_bf16(arr):
"""Convert a numpy array of float to a TVM array
of bf16"""
nparr = np_float2np_bf16(arr)
return tvm.nd.empty(nparr.shape, "uint16").copyfrom(nparr)
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
def np_bf16_cast_and_cast_back(arr):
"""Convert a numpy array of float to bf16 and cast back"""
return np_bf162np_float(np_float2np_bf16(arr))
@tvm.testing.requires_llvm
def test_llvm_bf16():
def dotest(do_vectorize):
np.random.seed(122)
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
print(tvm.lower(sch, [A, B, d]))
if do_vectorize:
sch[d].vectorize(d.op.axis[0])
module = tvm.build(sch, [A, B, d])
npa = np.random.rand(32).astype("float32")
npb = np.random.rand(32).astype("float32")
va = np_bf16_cast_and_cast_back(npa)
vb = np_bf16_cast_and_cast_back(npb)
res = np_bf16_cast_and_cast_back(va + vb)
a_ = np_float2tvm_bf16(npa)
b_ = np_float2tvm_bf16(npb)
c_ = tvm.nd.empty((32,), "uint16")
module(a_, b_, c_)
tvm.testing.assert_allclose(np_bf162np_float(c_.numpy()), res)
dotest(True)
dotest(False)
@tvm.testing.requires_llvm
def test_llvm_crt_static_lib():
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
module = tvm.build(
sch,
[A, B, d],
target=tvm.target.Target("llvm"),
runtime=Runtime("crt", {"system-lib": True}),
)
print(module.get_source())
module.save("test.o")
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
@tvm.testing.requires_llvm
def test_llvm_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
with ib.for_range(0, n, name="i", kind="parallel") as i:
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
A = tvm.te.placeholder((100,), dtype="int32", name="A")
C = tvm.te.extern((100,), [A], lambda ins, _: do_atomic_add(ins[0]), name="C", dtype="int32")
s = tvm.te.create_schedule(C.op)
# This does not work because of pointer type mismatch
# TVMError: LLVM module verification failed with the following errors:
# Argument value type does not match pointer operand type!
# %21 = atomicrmw add i8* %7, i32 1 monotonic
# i8
# f = tvm.build(s, [A], target="llvm")
@tvm.testing.requires_llvm
@tvm.testing.requires_gpu
def test_llvm_gpu_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
nthread_tx = 64
with ib.new_scope():
nthread_bx = (n + nthread_tx - 1) // nthread_tx
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
size = 1024
# CI uses LLVM 8, which does not support float atomic
for dtype in ["int32"]:
A = tvm.te.placeholder((size,), dtype=dtype, name="A")
C = tvm.te.extern((size,), [A], lambda ins, _: do_atomic_add(ins[0]), dtype=dtype)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A], target="nvptx")
dev = tvm.cuda()
a = tvm.nd.array(np.zeros((size,)).astype(A.dtype), dev)
f(a)
ref = np.zeros((size,)).astype(A.dtype)
ref[0] = size
tvm.testing.assert_allclose(a.numpy(), ref, rtol=1e-5)
@tvm.testing.requires_llvm
def test_llvm_order_functions():
"""Check that functions in the LLVM module are ordered alphabetically."""
# Note: the order is alphabetical because that's a predictable ordering. Any predictable
# ordering will work fine, but if the ordering changes, this test will need to be updated.
def make_call_extern(caller, callee):
# Create a function:
# float32 caller(float32 v) { return callee(v); }
ib = tvm.tir.ir_builder.create()
v = tvm.te.var("v", dtype="float32")
t = tvm.tir.call_extern("float32", callee, v)
ib.emit(t)
return tvm.tir.PrimFunc([v], ib.get()).with_attr("global_symbol", caller)
# Create some functions in a random order.
functions = {
"Danny": make_call_extern("Danny", "Dave"),
"Sammy": make_call_extern("Sammy", "Eve"),
"Kirby": make_call_extern("Kirby", "Fred"),
}
mod = tvm.IRModule(functions=functions)
ir_text = tvm.build(mod, None, target="llvm").get_source("ll")
# Skip functions whose names start with _.
matches = re.findall(r"^define[^@]*@([a-zA-Z][a-zA-Z0-9_]*)", ir_text, re.MULTILINE)
assert matches == sorted(matches)
@tvm.testing.requires_llvm
@tvm.testing.skip_if_32bit
def test_llvm_import():
"""all-platform-minimal-test: check shell dependent clang behavior."""
# extern "C" is necessary to get the correct signature
cc_code = """
extern "C" float my_add(float x, float y) {
return x + y;
}
"""
n = 10
A = te.placeholder((n,), name="A")
B = te.compute(
(n,), lambda *i: tvm.tir.call_pure_extern("float32", "my_add", A(*i), 1.0), name="B"
)
def check_llvm(use_file):
if not clang.find_clang(required=False):
print("skip because clang is not available")
return
temp = utils.tempdir()
ll_path = temp.relpath("temp.ll")
ll_code = clang.create_llvm(cc_code, output=ll_path)
s = te.create_schedule(B.op)
if use_file:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_path)
else:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_code)
# BUILD and invoke the kernel.
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1.0)
check_llvm(use_file=True)
check_llvm(use_file=False)
@tvm.testing.requires_llvm
def test_llvm_scalar_concat():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.decl_buffer((1,), "int32x2")
s = tvm.tir.Shuffle([x, y], [0, 1])
f = tvm.tir.PrimFunc([x, y, z], z.vstore(0, s))
mod = tvm.ir.IRModule.from_expr(f.with_attr("global_symbol", "codegen_scalar_concat"))
# This will crash in LLVM codegen if CodeGenLLVM::CreateVecConcat doesn't convert
# scalars to single-lane LLVM vectors.
with tvm.transform.PassContext(config={"tir.disable_assert": True}):
m = tvm.build(mod, [x, y, z], target="llvm")
@tvm.testing.requires_llvm
def test_raise_exception_during_codegen():
@T.prim_func
def threadpool_nested_parallel_loop(
A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i in T.parallel(4):
for j in T.parallel(4):
B[i, j] = A[i, j] * 2.0
with pytest.raises(tvm.TVMError) as e:
tvm.build({"llvm": tvm.IRModule.from_expr(threadpool_nested_parallel_loop)})
msg = str(e)
assert msg.find("Nested parallel loop is not supported") != -1
@tvm.testing.requires_llvm
def test_llvm_target_attributes():
"""Check that when LLVM codegen creates new functions, they get the same target
attributes as the original function.
"""
n = te.var()
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
s[C].parallel(xo)
target_llvm = "llvm -mcpu=skylake -mattr=+avx512f"
target = tvm.target.Target(target_llvm, host=target_llvm)
module = tvm.build(s, [A, B, C, n], target=target, name="test_func")
llvm_ir = module.get_source()
llvm_ir_lines = llvm_ir.split("\n")
attribute_definitions = dict()
attributes_with_target = dict()
functions_with_target = []
for line in llvm_ir_lines:
func_def = re.match(
"define.* @(?P<func_name>[^(]*)[(].* #(?P<attr_num>[0-9]+) (!.* |){$", line
)
if func_def:
functions_with_target.append(func_def.group("func_name"))
attributes_with_target[func_def.group("attr_num")] = True
continue
attr_def = re.match("attributes #(?P<attr_num>[0-9]+) = {(?P<attr_list>.*)}", line)
if attr_def:
attribute_definitions[attr_def.group("attr_num")] = attr_def.group("attr_list")
for k in list(attributes_with_target.keys()):
assert re.match('.*"target-cpu"="skylake".*', attribute_definitions[k])
assert re.match('.*"target-features"=".*[+]avx512f.*".*', attribute_definitions[k])
expected_functions = ["test_func", "test_func_compute_", "__tvm_parallel_lambda"]
for n in expected_functions:
assert n in functions_with_target
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_metal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import topi
import unittest
from tvm.contrib.nvcc import have_fp16, have_int8, have_bf16
from tvm.contrib import nvcc
import tvm.testing
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_gpu
@tvm.testing.requires_metal
def test_metal_inf_nan():
target = "metal"
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float16")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float16")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_metal
def test_metal_erf():
target = "metal"
def check_erf(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_erf(dev, 1, "float32")
check_erf(dev, 1, "float16")
if __name__ == "__main__":
test_metal_inf_nan()
test_metal_erf()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_opencl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import re
target = "opencl"
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_ternary_expression():
def check_if_then_else(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.if_then_else(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
def check_select(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.Select(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_if_then_else(dev, 1, "int8")
check_if_then_else(dev, 1, "uint8")
check_if_then_else(dev, 1, "int16")
check_if_then_else(dev, 1, "uint16")
check_select(dev, 1, "int8")
check_select(dev, 1, "uint8")
check_select(dev, 1, "int16")
check_select(dev, 1, "uint16")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_inf_nan():
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_max():
def check_max(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
max_lhs = A[0] + tvm.tir.const(1, dtype=dtype)
max_rhs = tvm.tir.const(0, dtype=dtype)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_max(dev, 1, "int8")
check_max(dev, 1, "uint8")
check_max(dev, 1, "int16")
check_max(dev, 1, "uint16")
check_max(dev, 1, "float32")
check_max(dev, 1, "float64")
def test_opencl_erf():
def check_erf(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
source_str = fun.imported_modules[0].get_source()
matches = re.findall("erf", source_str)
error_matches = re.findall("erff", source_str)
assert len(matches) == 1 and len(error_matches) == 0
dev = tvm.device(target, 0)
check_erf(dev, 1, "float32")
check_erf(dev, 1, "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_type_casting():
def check_type_casting(ctx, n, dtype):
block_size = 4
C = te.compute(
(n,),
lambda i: tvm.tir.Select(
tvm.tir.all(
*[
i // block_size == tvm.tir.const(3, "int32"),
i % block_size == tvm.tir.const(3, "int32"),
]
),
tvm.tir.const(1, dtype),
tvm.tir.const(0, dtype),
),
name="C",
)
s = te.create_schedule(C.op)
(tx, vx) = s[C].split(s[C].op.axis[0], factor=block_size)
s[C].vectorize(vx)
thrx = te.thread_axis("threadIdx.x")
s[C].bind(tx, thrx)
fun = tvm.build(s, [C], target)
c = tvm.nd.empty((n,), dtype, ctx)
assembly = fun.imported_modules[0].get_source()
false_branch = "((float4)(0.000000e+00f, 0.000000e+00f, 0.000000e+00f, 0.000000e+00f))"
true_branch = "((float4)(1.000000e+00f, 1.000000e+00f, 1.000000e+00f, 1.000000e+00f))"
lcond = "(convert_uint4(((uint4)((((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3), (((int)get_local_id(0)) == 3)))))"
rcond = "(convert_uint4((((int4)((0)+(1*0), (0)+(1*1), (0)+(1*2), (0)+(1*3))) == ((int4)(3, 3, 3, 3)))))"
cond = "({} && {})".format(lcond, rcond)
select = "select({}, {}, {})".format(false_branch, true_branch, cond)
count = assembly.count(select)
assert count == 1
fun(c)
dev = tvm.device(target, 0)
check_type_casting(dev, 16, "float32")
if __name__ == "__main__":
test_opencl_ternary_expression()
test_opencl_inf_nan()
test_opencl_max()
test_opencl_erf()
test_opencl_type_casting()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
import unittest
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
@tvm.testing.requires_rocm
def test_rocm_cross_thread_reduction():
# based on the reduction tutorial
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(xo, bx)
s[B].bind(xi, ty)
s[B].bind(s[B].op.reduce_axis[0], tx)
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
s[B].set_store_predicate(tx.var.equal(0))
frocm = tvm.build(s, [A, B], "rocm")
nn = 128
dev = tvm.rocm(0)
a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
frocm(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4)
@tvm.testing.requires_rocm
def test_rocm_inf_nan():
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], "rocm")
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.rocm(0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_rocm
def test_rocm_reduction_binding():
k = te.reduce_axis((0, 32), "k")
A = te.placeholder((96, 32), name="A")
B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, bx)
@tvm.testing.requires_rocm
def test_rocm_copy():
def check_rocm(dtype, n):
A = te.placeholder((n,), name="A", dtype=dtype)
dev = tvm.rocm(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
for _ in range(100):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_rocm(dtype, int(peturb * (2**logN)))
@tvm.testing.requires_rocm
def test_rocm_vectorize_add():
num_thread = 8
def check_rocm(dtype, n, lanes):
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "rocm")
dev = tvm.rocm(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_rocm("float32", 64, 2)
check_rocm("float16", 64, 2)
if __name__ == "__main__":
test_rocm_cross_thread_reduction()
test_rocm_inf_nan()
test_rocm_reduction_binding()
test_rocm_copy()
test_rocm_vectorize_add()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_static_init.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import ctypes
import numpy as np
def test_static_callback():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
cp = te.thread_axis((0, 1), "cop")
finit = tvm.tir.StringImm("TVMBackendRunOnce")
ib.scope_attr(cp, "coproc_uop_scope", finit)
with ib.for_range(0, n, "i", kind="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
f(a)
np.testing.assert_equal(a.numpy(), np.ones(a.shape[0]))
def test_static_init():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
handle = tvm.tir.call_intrin("handle", "tir.tvm_static_handle")
ib.emit(tvm.tir.call_packed("test_static_callback", handle, Ab))
@tvm.register_func("test_static_callback")
def test_cb(sh, A):
assert isinstance(sh, ctypes.c_void_p)
return sh
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
if __name__ == "__main__":
test_static_callback()
test_static_init()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_vm_basic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
def run_jit(fapi, check):
for target in ["llvm", "stackvm"]:
if not tvm.testing.device_enabled(target):
continue
f = tvm.driver.build(fapi, target=target)
s = f.get_source()
check(f)
def test_stack_vm_basic():
a = tvm.nd.array(np.zeros(10, dtype="float32"))
@tvm.register_func
def tvm_call_back_get_shape(shape0):
print(shape0)
assert shape0 == a.shape[0]
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), "float32")
stmt = tvm.tir.Evaluate(tvm.tir.call_packed("tvm_call_back_get_shape", Ab.shape[0]))
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "print_shape")
)
run_jit(mod, lambda f: f(a))
@tvm.register_func
def tvm_stack_vm_print(*x):
print(x)
def test_stack_vm_loop():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
A[i + 1] = A[i] + 1
ib.emit(tvm.tir.call_packed("tvm_stack_vm_print", i))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
a = tvm.nd.array(np.zeros(10, dtype=dtype))
def check(f):
f(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
run_jit(mod, check)
def test_stack_vm_cond():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
with ib.if_scope(tvm.tir.EQ(i, 4)):
A[i + 1] = A[i] + 1
with ib.else_scope():
A[i + 1] = A[i] + 2
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test"))
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
y = np.arange(a.shape[0]) * 2
y[5:] -= 1
np.testing.assert_equal(a.numpy(), y)
run_jit(mod, check)
def test_vm_parallel():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n, "i", kind="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test"))
def check(f):
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
np.testing.assert_equal(a.numpy(), np.ones(a.shape[0]))
run_jit(mod, check)
if __name__ == "__main__":
test_vm_parallel()
test_stack_vm_loop()
test_stack_vm_basic()
test_stack_vm_cond()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_vulkan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from posixpath import split
import random
import re
import threading
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.topi.math import cast
dtype = tvm.testing.parameter("float32", "int32", "float16", "int8")
fuzz_seed = tvm.testing.parameter(range(25))
# Explicitly specify a target, as this test is looking at the
# generated shader code, and is not running on an actual device.
@tvm.testing.parametrize_targets(
" ".join(
[
"vulkan",
"-supports_int8=1",
"-supports_8bit_buffer=1",
"-supports_storage_buffer_storage_class=1",
"-supports_float16=1",
"-supports_16bit_buffer=1",
]
)
)
def test_vector_comparison(target, dtype):
n = (1024,)
A = te.placeholder(n, dtype=dtype, name="A")
B = te.compute(
A.shape,
lambda i: tvm.tir.Select(
A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype)
),
name="B",
)
s = te.create_schedule(B.op)
(bx, tx) = s[B].split(s[B].op.axis[0], factor=128)
(tx, vx) = s[B].split(tx, factor=4)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
s[B].vectorize(vx)
f = tvm.build(s, [A, B], target)
# Verify we generate the boolx4 type declaration and the OpSelect
# v4{float,half,int} instruction
assembly = f.imported_modules[0].get_source()
matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly)
assert len(matches) == 1
matches = re.findall("OpSelect %v4.*", assembly)
assert len(matches) == 1
def test_array_copy(dev, dtype, fuzz_seed):
np.random.seed(fuzz_seed)
log_arr_size = np.random.uniform(low=np.log(1), high=np.log(32768))
arr_size = np.exp(log_arr_size).astype(int)
a_np = np.random.uniform(size=(arr_size,)).astype(dtype)
a = tvm.nd.empty((arr_size,), dtype, dev).copyfrom(a_np)
b_np = a.numpy()
tvm.testing.assert_allclose(a_np, b_np)
tvm.testing.assert_allclose(a_np, a.numpy())
@tvm.testing.exclude_targets("llvm")
def test_array_vectorize_add(target, dev, dtype):
arr_size = 64
lanes = 2
if "opencl" in target and dtype == "float16":
pytest.xfail("Opencl target does not support float16")
num_thread = 8
A = te.placeholder((arr_size,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((arr_size,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B], target)
a = tvm.nd.empty((arr_size,), A.dtype, dev).copyfrom(np.random.uniform(size=(arr_size, lanes)))
c = tvm.nd.empty((arr_size,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
@tvm.testing.parametrize_targets("vulkan")
@pytest.mark.skip("Flaky, https://github.com/apache/tvm/issues/10779")
def test_vulkan_stress(target, dev):
"""
Launch a randomized test with multiple kernels per stream, multiple uses of
kernels per stream, over multiple threads.
"""
n = 1024
num_thread = 64
def run_stress():
def worker():
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
functions = [
(
lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]),
lambda a, b: 2 * a + 3 * b,
),
(lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b),
(lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b),
]
def build_f(f_ref):
(C_f, ref) = f_ref
C = C_f()
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(xi, te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, B, C], target)
return (fun, ref)
fs = [
build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10))
]
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,)))
cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs]
for ((f, _), c) in zip(fs, cs):
f(a, b, c)
for ((_, ref), c) in zip(fs, cs):
tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy()))
ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))]
for t in ts:
t.start()
for t in ts:
t.join()
run_stress()
@tvm.testing.exclude_targets("llvm")
def test_vulkan_bool_load(target, dev):
arr_size = 1024
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
supports_int8_buffer = target.attrs.get("supports_int8", False) and target.attrs.get(
"supports_8bit_buffer", False
)
if not supports_int8_buffer:
pytest.xfail(
"Vulkan target does not support int8 buffer access, used to transfer booleans"
)
def do_copy(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
B[tid] = cast(A[tid], "int32")
return ib.get()
A = te.placeholder((arr_size,), name="A", dtype="bool")
B = te.placeholder((arr_size,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_copy(ins[0], outs[0], arr_size),
name="bool_copy_ir",
dtype="int32",
)
s = te.create_schedule(B.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a_np = np.random.uniform(size=arr_size) > 0.5
b_np = np.zeros((arr_size,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
ref = a_np.astype(np.int32)
tvm.testing.assert_allclose(b.numpy(), ref)
def check_mod(target, dev, mod, x_np, res_np):
res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x_np).numpy()
tvm.testing.assert_allclose(res, res_np, atol=1e-5)
def test_sqrt(target, dev):
# Three 32 bit pushconstants: any_dim, stride, stride
dtype = "float32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.sqrt(x))
x_np = np.random.uniform(size=(10,)).astype(dtype)
res_np = np.sqrt(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_argsort(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.argsort(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.argsort(x_np)
check_mod(target, dev, mod, x_np, res_np)
def test_cumsum(target, dev):
# One 64 bit and one 32 bit constants
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay.cumsum(x))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.cumsum(x_np)
check_mod(target, dev, mod, x_np, res_np)
@tvm.testing.skip_if_wheel_test
def test_unique(target, dev):
dtype = "int32"
x = relay.var("x", shape=(relay.Any(),), dtype=dtype)
mod = tvm.IRModule()
[unique, _, _, num_unique] = relay.unique(x, is_sorted=True)
mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique))
x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype)
res_np = np.unique(x_np)
check_mod(target, dev, mod, x_np, res_np)
vulkan_parameter_impl = tvm.testing.parameter("push_constants", "ubo")
vulkan_parameter_dtype = tvm.testing.parameter("int32", "float32", "int64")
# Only run on vulkan because extremely large numbers of input
# parameters can crash cuda/llvm compiler.
@tvm.testing.parametrize_targets("vulkan -from_device=0")
def test_vulkan_constant_passing(target, dev, vulkan_parameter_impl, vulkan_parameter_dtype):
target = tvm.target.Target(target)
dtype = vulkan_parameter_dtype
if not target.attrs.get("supports_int64", False):
pytest.xfail("Vulkan target does not support Int64 variables")
# f_add has 3+num_int_params scalar parameters. The other three
# are length_n, stride1, and stride2.
if vulkan_parameter_impl == "push_constants":
# 4 params, 32 bytes. Within 128-byte spec-guaranteed size of
# push constants. Uses push constants.
num_int_params = 1
else:
# 24 params, 192 bytes. May be above spec-guaranteed size of 128
# bytes for push constants. Uses either push constants or UBO,
# depending on the device.
max_push_constants_size = int(target.attrs.get("max_push_constants_size", 128))
max_int_params_in_push = max_push_constants_size // 8 - 3
num_int_params = max_int_params_in_push + 1
n = te.var("n")
scalars = [te.var("scale{}".format(i), dtype=dtype) for i in range(num_int_params)]
scalar_sum = scalars[0]
for s in scalars[1:]:
scalar_sum += s
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f_add = tvm.build(s, scalars + [A, B], target)
n = 1024
scalars = np.array([1 for _ in scalars]).astype(dtype)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f_add(*scalars, a, b)
tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy())
def test_vulkan_while_if(target, dev):
target = tvm.target.Target(target)
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
iterations[0] = 0
B[0] = 0
# WhileNode's condition is re-evaluated every loop. The
# if_then_else block introduces additional labels/blocks that
# must be kept separate from the WhileNode's block.
loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20)
with ib.while_loop(loop_condition):
iterations[0] += 1
B[0] += iterations[0]
return ib.get()
n = 1
dtype = "int32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype=dtype,
)
s = te.create_schedule(B.op)
# Point of failure would be here, at tvm.build.
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B], target)
a = tvm.nd.array(np.array([5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [55])
a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), [210])
@tvm.testing.exclude_targets("llvm")
def test_vulkan_local_threadidx(target, dev):
# To access the thread index, the vulkan runtime accesses a global
# array of thread indices, storing the result in a local variable.
# In CUDA, these are the built-in threadIdx.x variables, which are
# globally accessible. In vulkan, these local variables must be
# defined inside a function, but are hoisted up to the function
# header to mimic the global CUDA semantics. Before this
# hoisting, this test could trigger spvValidate errors for
# potentially undeclared variables.
def do_compute(A, B, n):
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
# One single declaration of te.thread_axis.
tx = te.thread_axis("threadIdx.x")
with ib.for_range(0, 1):
# Used inside a for-loop scope, defines local thread_id
# variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 0] = A[tx + 0]
with ib.for_range(0, 1):
# Used in next scope. If local variable defined at point
# of use instead of function header, will fail spvValidate
# for access of out-of-scope local variable.
ib.scope_attr(tx, "thread_extent", 16)
B[tx + 16] = A[tx + 16]
return ib.get()
n = te.var("n")
A = te.placeholder((n,), name="A", dtype="int32")
B = te.placeholder((n,), name="B", dtype="int32")
B = te.extern(
A.shape,
[A],
lambda ins, outs: do_compute(ins[0], outs[0], n),
dtype="int32",
)
s = te.create_schedule(B.op)
# Expected failure occurs at build step.
func = tvm.build(s, [A, B], target)
n = 32
a_np = np.arange(n).astype(dtype=A.dtype)
b_np = np.zeros((n,), dtype="int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), a_np)
class TestVectorizedIndices:
load_type, store_type = tvm.testing.parameters(
# Load N values, write to N locations.
# Vectorized copy.
("ramp", "ramp"),
# Load 1 value, write to N locations.
# Scalar load, vectorized store.
#
# Most TVM operations (e.g. schedule[tensor].vectorize(axis)) have
# the broadcast outside of the index, but it is semantically okay
# for the broadcast to be inside the index, and it shows up with
# some optimizations.
("broadcast", "ramp"),
# Load 1 values, write to 1 location.
# Broadcasting on both sides should be equivalent to a scalar copy.
("broadcast", "broadcast"),
# Loads N values, write to 1 location.
# Disabled as it would have unclear semantics.
# ("ramp","broadcoast"),
)
indirect_indices = tvm.testing.parameter(True, False, ids=["reorder", "no_reorder"])
@tvm.testing.fixture
def ref_data(self, load_type, store_type, indirect_indices):
n = 4
index_map = {
"ramp": np.arange(n),
"broadcast": np.zeros(n, dtype="int32"),
}
a_np = np.random.randint(np.iinfo("int32").max, size=n).astype("int32")
b_np = np.zeros(shape=n, dtype=a_np.dtype)
reorder_np = np.arange(n, dtype="int32")[::-1]
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = reorder_np[load_index]
b_np[store_index] = a_np[load_index]
return a_np, reorder_np, b_np
@tvm.testing.fixture
def mod(self, target, load_type, store_type, indirect_indices):
target = tvm.target.Target(target)
n = 4
dtype = "int32"
A = te.placeholder((n,), dtype=dtype, name="A")
R = te.placeholder((n,), dtype=dtype, name="R")
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
A, R = map(ib.buffer_ptr, ins)
B = ib.buffer_ptr(outs[0])
if "gpu" in target.keys:
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
index_map = {
"ramp": tvm.tir.Ramp(0, 1, 4),
"broadcast": tvm.tir.Broadcast(0, 4),
}
load_index = index_map[load_type]
store_index = index_map[store_type]
if indirect_indices:
load_index = R[load_index]
B[store_index] = A[load_index]
return ib.get()
B = te.extern(A.shape, [A, R], do_compute, dtype="int32")
s = te.create_schedule(B.op)
return tvm.lower(s, [A, R, B])
def test_ramp_broadcast_index(self, target, dev, mod, ref_data):
f = tvm.build(mod, target=target)
a_np, reorder_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
r = tvm.nd.array(reorder_np, dev)
b = tvm.nd.array(np.zeros(shape=b_np.shape, dtype="int32"), dev)
f(a, r, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("vulkan -max_shared_memory_per_block=16384")
def test_shared_mem_alloc(target, dev):
alloc_nbytes = 16384 * 2
def do_compute(ins, outs):
ib = tvm.tir.ir_builder.create()
out = ib.buffer_ptr(outs[0])
ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0)
array = ib.allocate("int32", (alloc_nbytes,), name="array", scope="shared")
array[0] = 0
out[0] = array[0]
return ib.get()
Out = te.extern(
shape=(1,),
inputs=[],
fcompute=do_compute,
dtype="int32",
)
s = te.create_schedule(Out.op)
# Codegen should raise error when allocating more memory than the
# target supports.
with pytest.raises(tvm.TVMError):
tvm.build(s, [Out], target)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_codegen_x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import platform
import pytest
import re
import textwrap
import tvm
from tvm import te
llvm_version = tvm.target.codegen.llvm_version_major()
machine = platform.machine()
if machine not in ["i386", "x86_64", "AMD64", "amd64"]:
pytest.skip(f"Requires x86_64/i386, but machine is {machine}", allow_module_level=True)
@tvm.testing.requires_llvm
@pytest.mark.skipif(llvm_version < 6, reason=f"Requires LLVM 6+, got {llvm_version}")
def test_fp16_to_fp32():
def fp16_to_fp32(target, width, match=None, not_match=None):
elements = 64
n = tvm.runtime.convert(elements)
A = te.placeholder((n, width), dtype="float16", name="A")
B = te.compute(A.shape, lambda *i: A(*i).astype("float32"), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[1])
f = tvm.build(s, [A, B], target)
assembly = f.get_source("asm").splitlines()
if match:
matches = [l for l in assembly if re.search(match, l)]
assert matches
if not_match:
not_matches = [l for l in assembly if re.search(not_match, l)]
assert not not_matches
fp16_to_fp32("llvm -mcpu=skylake-avx512", 15, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 16, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 17, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512", 49, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-avx512f", 49, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-f16c,-avx512f", 49, not_match="vcvtph2ps")
fp16_to_fp32("llvm -mcpu=core-avx2", 8, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm -mcpu=core-avx2", 9, match="vcvtph2ps.*mm")
fp16_to_fp32("llvm", 9, not_match="vcvtph2ps")
is_32bit = platform.architecture()[0] == "32bit"
@tvm.testing.requires_llvm
@pytest.mark.skipif(is_32bit, reason=f"Fails in CI due to architecture mismatch in JIT")
@pytest.mark.parametrize("feature_string", ["-sse2", "+sse2"])
def test_fp16_fp32_conversions(feature_string):
relay_model = textwrap.dedent(
"""
#[version = "0.0.5"]
def @main(%inp : Tensor[(3), float32], %cst : Tensor[(3), float32]) {
%1 = cast(%inp, dtype="float16");
%2 = cast(%cst, dtype="float16");
%3 = add(%1, %2);
%4 = cast(%3, dtype="float32");
%4
}
"""
)
ir_mod = tvm.parser.fromtext(relay_model)
arch = "i386" if machine == "i386" else "x86_64"
aot_factory = tvm.relay.build(
ir_mod,
params={"cst": np.array([1.0, 2.0, 3.0], dtype="float32")},
target=f"llvm --mtriple={arch} --mattr={feature_string}",
executor=tvm.relay.backend.Executor(
"aot", {"interface-api": "packed", "unpacked-api": False}
),
)
mod_name = aot_factory["list_module_names"]()[0]
executor = aot_factory[mod_name]
mod = executor(tvm.cpu(0))
inp = tvm.nd.array(np.array([1.1, 2.1, 3.1], dtype="float32"), device=tvm.cpu(0))
mod.get_function("set_input")(0, inp)
mod.get_function("run")()
out = mod.get_function("get_output")(0)
expected = np.array([2.1, 4.1, 6.1], dtype="float32")
np.testing.assert_allclose(out.asnumpy(), expected, rtol=1e-3)
if __name__ == "__main__":
test_fp16_to_fp32()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_parser_mprofile.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests to verify Python interactions with Target Parsing
"""
import pytest
from tvm.target import Target
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_mve(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m7")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_dsp(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m3")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert not parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["llvm"]])
def test_target_parser_mprofile_mattr(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55 -mattr=+nomve,+woof")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pytest
import tvm
import tvm.testing
from tvm.target import Target, arm_cpu, bifrost, cuda, intel_graphics, mali, rocm, vta
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_all_targets_device_type_verify():
"""Consistency verification for all targets' device type"""
all_targets = [tvm.target.Target(t) for t in tvm.target.Target.list_kinds()]
for tgt in all_targets:
# skip targets with hooks or otherwise intended to be used with external codegen
relay_to_tir = tgt.get_kind_attr("RelayToTIR")
tir_to_runtime = tgt.get_kind_attr("TIRToRuntime")
is_external_codegen = tgt.get_kind_attr("is_external_codegen")
if relay_to_tir is not None or tir_to_runtime is not None or is_external_codegen:
continue
if tgt.kind.name not in tvm._ffi.runtime_ctypes.Device.STR2MASK:
raise KeyError("Cannot find target kind: %s in Device.STR2MASK" % tgt.kind.name)
assert (
tgt.get_target_device_type() == tvm._ffi.runtime_ctypes.Device.STR2MASK[tgt.kind.name]
)
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
assert mygeneric.get_packed_func()(1) == 4
with tvm.target.Target("cuda"):
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
assert mygeneric.get_packed_func()(1) == 11
with tvm.target.Target("metal"):
assert mygeneric(1) == 3
assert mygeneric.get_packed_func()(1) == 3
assert tvm.target.Target.current() is None
@tvm.target.override_native_generic_func("test_target_temp_strategy")
def target_generic(data):
# default generic function
return data + 1
@target_generic.register(["cuda", "gpu"])
def target_cuda_func(data):
return data + 2
def temp_target_cuda_func(data):
return data + 3
def test_target_temp_strategy():
class TempStrategy(object):
def __init__(self, name, target, fstrategy):
generic_fstrategy = tvm.target.get_native_generic_func(name)
self.target = target
self.name = name
self.origin_func = {}
with tvm.target.Target(target) as target_obj:
for tgt_key in target_obj.keys:
self.origin_func[tgt_key] = generic_fstrategy.get_packed_func()
generic_fstrategy.register(fstrategy, tgt_key, allow_override=True)
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
generic_fstrategy = tvm.target.get_native_generic_func(self.name)
with tvm.target.Target(self.target) as target_obj:
for tgt_key in target_obj.keys:
generic_fstrategy.register(
self.origin_func[tgt_key], tgt_key, allow_override=True
)
with tvm.target.Target("cuda"):
assert target_generic(1) == 3
# The strategy func change to temp_target_cuda_func.
with TempStrategy("test_target_temp_strategy", "cuda", temp_target_cuda_func):
with tvm.target.Target("cuda"):
assert target_generic(1) == 4
with tvm.target.Target("cuda"):
assert target_generic(1) == 3
def test_target_string_parse():
target = tvm.target.Target("cuda -model=unknown -libs=cublas,cudnn")
assert target.kind.name == "cuda"
assert target.model == "unknown"
assert set(target.keys) == set(["cuda", "gpu"])
assert set(target.libs) == set(["cublas", "cudnn"])
assert str(target) == str(tvm.target.cuda(options="-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
assert tvm.target.mali().device_name == "mali"
assert tvm.target.arm_cpu().device_name == "arm_cpu"
def test_target_string_with_spaces():
target = tvm.target.Target(
"vulkan -device_name='Name of GPU with spaces' -device_type=discrete"
)
assert target.attrs["device_name"] == "Name of GPU with spaces"
assert target.attrs["device_type"] == "discrete"
target = tvm.target.Target(str(target))
assert target.attrs["device_name"] == "Name of GPU with spaces"
assert target.attrs["device_type"] == "discrete"
def test_target_llvm_options():
target = tvm.target.Target("llvm -cl-opt='-unroll-threshold:uint=100,-unroll-count:uint=3'")
assert sorted(target.attrs["cl-opt"]) == sorted(
["-unroll-threshold:uint=100", "-unroll-count:uint=3"]
)
def test_target_create():
targets = [cuda(), rocm(), mali(), intel_graphics(), arm_cpu("rk3399"), vta(), bifrost()]
for tgt in targets:
assert tgt is not None
def test_target_config():
"""
Test that constructing a target from a dictionary works.
"""
target_config = {
"kind": "llvm",
"keys": ["arm_cpu", "cpu"],
"device": "arm_cpu",
"libs": ["cblas"],
"mfloat-abi": "hard",
"mattr": ["+neon", "-avx512f"],
}
# Convert config dictionary to json string.
target_config_str = json.dumps(target_config)
# Test both dictionary input and json string.
for config in [target_config, target_config_str]:
target = tvm.target.Target(config)
assert target.kind.name == "llvm"
assert all([key in target.keys for key in ["arm_cpu", "cpu"]])
assert target.device_name == "arm_cpu"
assert target.libs == ["cblas"]
assert target.attrs["mfloat-abi"] == "hard"
assert all([attr in target.attrs["mattr"] for attr in ["+neon", "-avx512f"]])
def test_config_map():
"""
Confirm that constructing a target with invalid
attributes fails as expected.
"""
target_config = {"kind": "llvm", "libs": {"a": "b", "c": "d"}}
with pytest.raises(ValueError):
tvm.target.Target(target_config)
def test_composite_target():
tgt = tvm.target.Target("composite --host=llvm --devices=cuda,opencl")
assert tgt.kind.name == "composite"
assert tgt.host.kind.name == "llvm"
assert len(tgt.attrs["devices"]) == 2
cuda_device, opencl_device = tgt.attrs["devices"]
assert cuda_device.kind.name == "cuda"
assert opencl_device.kind.name == "opencl"
def test_target_tag_0():
tgt = tvm.target.Target("nvidia/geforce-rtx-2080-ti")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_75"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 65536
def test_target_tag_1():
tgt = tvm.target.Target("nvidia/jetson-nano")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
def test_list_kinds():
targets = tvm.target.Target.list_kinds()
assert len(targets) != 0
assert "llvm" in targets
assert all(isinstance(target_name, str) for target_name in targets)
def test_target_host_tags():
tgt = tvm.target.Target("nvidia/jetson-nano", "nvidia/geforce-rtx-2080-ti")
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_75"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 65536
def test_target_host_tag_dict():
tgt = tvm.target.Target("nvidia/jetson-nano", {"kind": "llvm"})
assert tgt.kind.name == "cuda"
assert tgt.attrs["arch"] == "sm_53"
assert tgt.attrs["max_shared_memory_per_block"] == 49152
assert tgt.attrs["max_threads_per_block"] == 1024
assert tgt.attrs["thread_warp_size"] == 32
assert tgt.attrs["registers_per_block"] == 32768
assert tgt.host.kind.name == "llvm"
def test_target_host_single_dict():
tgt = tvm.target.Target({"kind": "llvm", "host": "nvidia/jetson-nano"})
assert tgt.kind.name == "llvm"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_single_string():
tgt = tvm.target.Target("cuda --host llvm")
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_host_single_string_with_tag():
tgt = tvm.target.Target("cuda --host nvidia/jetson-nano")
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_merge_0():
tgt = tvm.target.Target(tvm.target.Target("cuda --host nvidia/jetson-nano"), None)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_target_host_merge_1():
tgt = tvm.target.Target("cuda --host llvm")
tgt = tvm.target.Target(tgt, tgt.host)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_host_merge_2():
"""Test picking the same host is ok."""
tgt = tvm.target.Target(tvm.target.Target("cuda --host llvm"), tvm.target.Target("llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
def test_target_tvm_object():
"""Test creating Target by using TVM Objects"""
String = tvm.runtime.container.String
tgt = tvm.target.Target(target=String("cuda --host llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
tgt = tvm.target.Target(target=String("cuda"), host=String("llvm"))
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
@pytest.mark.skip(reason="Causing infinite loop because of pytest and handle issue")
def test_target_host_merge_3():
with pytest.raises(ValueError, match=r"target host has to be a string or dictionary."):
tvm.target.Target(tvm.target.Target("cuda --host llvm"), 12.34)
def test_target_with_host():
tgt = tvm.target.Target("cuda")
llvm = tvm.target.Target("llvm")
tgt = tgt.with_host(llvm)
assert tgt.kind.name == "cuda"
assert tgt.host.kind.name == "llvm"
cuda_host = tvm.target.Target("nvidia/jetson-nano")
tgt = tgt.with_host(cuda_host)
assert tgt.host.kind.name == "cuda"
assert tgt.host.attrs["arch"] == "sm_53"
assert tgt.host.attrs["max_shared_memory_per_block"] == 49152
assert tgt.host.attrs["max_threads_per_block"] == 1024
assert tgt.host.attrs["thread_warp_size"] == 32
assert tgt.host.attrs["registers_per_block"] == 32768
def test_canon_target_and_host_0():
target = None
host = None
target, host = Target.canon_target_and_host(target, host)
assert target is None
assert host is None
def test_canon_target_and_host_1():
target = None
host = "llvm"
with pytest.raises(AssertionError, match=r"Target host is not empty when target is empty."):
target, host = Target.canon_target_and_host(target, host)
def test_canon_target_and_host_2():
target = Target("cuda")
host = Target("llvm")
target, host = Target.canon_target_and_host(target, host)
assert target.kind.name == "cuda"
assert target.host.kind.name == "llvm"
def test_canon_target_and_host_3():
target = Target(target="cuda", host="llvm")
host = None
target, host = Target.canon_target_and_host(target, host)
assert target.kind.name == "cuda"
assert target.host.kind.name == "llvm"
assert host.kind.name == "llvm"
assert target.host == host
def test_canon_multi_target_and_host_0():
with pytest.raises(AssertionError):
Target.canon_multi_target_and_host(None)
def test_canon_multi_target_and_host_1():
raw_targets = Target.canon_multi_target_and_host({"kind": "llvm"})
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "llvm"
def test_canon_multi_target_and_host_2():
raw_targets = Target.canon_multi_target_and_host({1: "llvm", 2: "cuda"})
assert len(raw_targets) == 2
assert raw_targets[0].kind.name == "llvm"
assert raw_targets[1].kind.name == "cuda"
def test_canon_multi_target_and_host_3():
raw_targets = Target.canon_multi_target_and_host(["llvm", "cuda"])
assert len(raw_targets) == 2
assert raw_targets[0].kind.name == "llvm"
assert raw_targets[1].kind.name == "cuda"
def test_canon_multi_target_and_host_4():
raw_targets = Target.canon_multi_target_and_host("llvm")
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "llvm"
def test_canon_multi_target_and_host_5():
raw_targets = Target.canon_multi_target_and_host("cuda", "llvm")
assert len(raw_targets) == 1
assert raw_targets[0].kind.name == "cuda"
assert raw_targets[0].host.kind.name == "llvm"
def test_canon_multi_target_and_host_6():
"""Test `canon_target_and_host` by using TVM Objects"""
cuda_device_type = tvm.device("cuda").device_type
target = {cuda_device_type: Target(target="cuda", host="llvm")}
host = None
raw_targets_1 = Target.canon_multi_target_and_host(target, host)
assert len(raw_targets_1) == 1
assert raw_targets_1[0].kind.name == "cuda"
assert raw_targets_1[0].host.kind.name == "llvm"
target = {cuda_device_type: Target(tvm.runtime.container.String("cuda"))}
host = Target(tvm.runtime.container.String("llvm"))
target = tvm.runtime.convert(target)
assert isinstance(target, tvm.ir.container.Map)
raw_targets_2 = Target.canon_multi_target_and_host(target, host)
assert len(raw_targets_2) == 1
assert raw_targets_2[0].kind.name == "cuda"
assert raw_targets_2[0].host.kind.name == "llvm"
def test_canon_target_map_and_host():
target_map = {"cuda": "cuda_module", "llvm": "cpu_module"}
target_map, host = Target.canon_target_map_and_host(target_map, "llvm")
assert host.kind.name == "llvm"
for t, v in target_map.items():
assert t.host.kind.name == "llvm"
if t.kind.name == "cuda":
assert v == "cuda_module"
elif t.kind.name == "llvm":
assert v == "cpu_module"
else:
assert False
def test_target_attr_bool_value():
target0 = Target("vulkan --supports_float16=True")
assert target0.attrs["supports_float16"] == 1
target1 = Target("vulkan --supports_float16=true")
assert target1.attrs["supports_float16"] == 1
target2 = Target("vulkan --supports_float16=False")
assert target2.attrs["supports_float16"] == 0
target3 = Target("vulkan --supports_float16=false")
assert target3.attrs["supports_float16"] == 0
def test_target_features():
target_no_features = Target("cuda")
assert target_no_features.features
assert not target_no_features.features.is_test
target_with_features = Target("test")
assert target_with_features.features.is_test
assert not target_with_features.features.is_missing
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_target_texture_codegen_opencl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import autotvm
from tvm import te
from tvm.topi import testing
from tvm.topi.utils import get_const_tuple, simplify
from tvm.topi import nn
def compute_plus_one_rank3(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k: X[i, j, k] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank3(X, Y):
s = te.create_schedule(Y.op)
# Xt = s.cache_read(X, "texture", [Y])
# Xt = s.cache_read(X, "global", [Y])
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
x, y, c = s[Xt].op.axis
s[Xt].bind(x, te.thread_axis("blockIdx.x"))
s[Xt].bind(y, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(c)
# the compute stage
x, y, c = s[Y].op.axis
xo, yo, xi, yi = s[Y].tile(x, y, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(c)
return s
def compute_plus_one_rank5(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k, l, m: X[i, j, k, l, m] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank5(X, Y):
s = te.create_schedule(Y.op)
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
a, b, c, d, e = s[Xt].op.axis
abc = s[Xt].fuse(a, b, c)
s[Xt].bind(abc, te.thread_axis("blockIdx.x"))
s[Xt].bind(d, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(e)
# the compute stage
a, b, c, d, e = s[Y].op.axis
abc = s[Y].fuse(a, b, c)
xo, yo, xi, yi = s[Y].tile(abc, d, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(e)
return s
def compute_matmul(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1]), name="k")
C = te.compute(
(shape[0] * shape[2], shape[0] * shape[2]),
lambda i, j: te.sum(
A[i // shape[2], k, i % shape[2]].astype("float32")
* B[j // shape[2], k, j % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_io, _k, _ii = s[stage].op.axis
s[stage].vectorize(_ii)
s[stage].bind(_io, bx)
s[stage].bind(_k, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_k, _x, _y)
s[Cl].unroll(_x)
s[Cl].vectorize(_y)
if local:
s[Al].compute_at(s[Cl], _k)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _k)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_inner(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1] * shape[2]), name="k")
# (M, K) x (N, K)
# (32, 256) x (32, 256)
# (32, 64, 4) x (32, 64, 4)
C = te.compute(
(shape[0], shape[0]),
lambda i, j: te.sum(
A[i, k // shape[2], k % shape[2]].astype("float32")
* B[j, k // shape[2], k % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_inner(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_i, _ko, _ki = s[stage].op.axis
s[stage].vectorize(_ki)
s[stage].bind(_i, bx)
s[stage].bind(_ko, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_x, _y, _k)
s[Cl].unroll(_x)
# TODO(csullivan): consider whether the below error is worth resolving
# s[Cl].vectorize(_y) # error
if local:
s[Al].compute_at(s[Cl], _x)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _x)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_vector_accumulator(shapeA, shapeB):
# A x B
# (K/4, M, K%4) x (K, N/4, N%4) = (M, N)
# (32, 64, 4) x (128, 16, 4) = (64, 64)
A = te.placeholder(shapeA, name="A", dtype="float32")
B = te.placeholder(shapeB, name="B", dtype="float32")
k = te.reduce_axis((0, shapeB[0]), name="k")
C = te.compute(
(shapeA[1], shapeB[1] * shapeB[2]),
lambda i, j: te.sum(
A[k // shapeA[-1], i, k % shapeA[-1]].astype("float32")
* B[k, j // shapeB[-1], j % shapeB[-1]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_vector_accumulator(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
_y, _x, _v = s[stage].op.axis
# TODO(csullivan): removing this vectorize results in numerical errors, autovectorize
s[stage].vectorize(_v)
s[stage].bind(_y, te.thread_axis("blockIdx.x"))
s[stage].bind(_x, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_a, _b = s[Cl].op.axis
_ko, _ki = s[Cl].split(_k, factor=4)
s[Cl].reorder(_ko, _a, _ki, _b)
s[Cl].unroll(_ki)
s[Cl].unroll(_a)
s[Cl].vectorize(_b)
if local:
s[Al].compute_at(s[Cl], _a)
_aa, _ka, _ba = s[Al].op.axis
# TODO(csullivan)[BEFORE PR]: removing this vectorize command causes a crash. This needs to be autovectorized.
s[Al].vectorize(_ba)
s[Bl].compute_at(s[Cl], _ko)
_ab, _kb, _bb = s[Bl].op.axis
s[Bl].vectorize(_bb)
s[Bl].unroll(_ab)
return s
def compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
kh = te.reduce_axis((0, filter_shape[0]), name="kh")
kw = te.reduce_axis((0, filter_shape[1]), name="kw")
conv = te.compute(
(input_shape[0], filter_shape[-2], input_shape[2], input_shape[3], filter_shape[-1]),
lambda n, ko, i, j, ki: te.sum(
data[n, c, i, j, c4].astype("float32")
* filt[kh, kw, c * input_shape[-1] + c4, ko, ki].astype("float32"),
axis=[kh, kw, c, c4],
),
# name="Compute_conv2d_1x1_NCHWc_RSCKk",
name="conv2d_1x1",
)
return data, filt, conv
def schedule_conv2d_1x1_NCHWc_RSCKk(data, filt, conv):
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# outputs:
s = te.create_schedule(conv.op)
A, B, C = data, filt, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_n, _ko, _h, _w, _ki = s[C].op.axis
s[C].vectorize(_ki)
s[C].bind(_n, te.thread_axis("blockIdx.x"))
s[C].bind(_ko, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], _w)
_nl, _kol, _hl, _wl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
_clo, _cli = s[Cl].split(_cl, factor=4)
s[Cl].reorder(_clo, _cli, _cl4, _kil)
s[Cl].unroll(_cli)
s[Cl].unroll(_cl4)
s[Cl].vectorize(_kil)
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _kwl)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
# input_shape = [W, C, H, N, c] -> [W, C, H*N, c]
# filter_shape = [C, R, S, K, k] -> [C, R*S*K, k]
# output_shape: [WK, HN, k] -> [W, K, H, N, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
packed_data = te.compute(
(input_shape[0], input_shape[1], input_shape[2] * input_shape[3], input_shape[4]),
lambda i, j, k, l: data[i, j, k // input_shape[3], k % input_shape[3], l],
name="packed_data",
)
# Logical transformation of Nd -> 3d tensor
# CRSKk -> C|RSK|k
# r = rsk // SK
# sk = rsk % SK
# s = sk // K == (rsk % SK) // K == (rsk // K) % S
# k = sk % K == (rsk % SK) % K == rsk % K
packed_filter = te.compute(
(filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3], filter_shape[4]),
lambda i, j, k: filt[
i,
j // (filter_shape[3] * filter_shape[2]),
(j // filter_shape[3]) % filter_shape[2],
j % filter_shape[3],
k,
],
name="packed_filter",
)
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
r = te.reduce_axis((0, filter_shape[1]), name="r")
s = te.reduce_axis((0, filter_shape[2]), name="s")
conv = te.compute(
(input_shape[0], filter_shape[3], input_shape[2], input_shape[3], filter_shape[4]),
lambda w, ko, h, n, ki: te.sum(
packed_data[w, c, h * input_shape[3] + n, c4].astype("float32")
* packed_filter[
c * input_shape[-1] + c4, ((r * filter_shape[2]) + s) * filter_shape[3] + ko, ki
].astype("float32"),
axis=[r, s, c, c4],
),
name="conv2d_1x1",
)
return data, filt, packed_data, packed_filter, conv
def schedule_conv2d_1x1_WCHNc_CRSKk(data, filt, packed_data, packed_filter, conv):
# data: [W, C, H*N, c]
# filter: [C, R*S*K, k]
# output: [W, K, H, N, k]
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# data: (56, 128//4, 56*1, 4) = (56, 32, 56, 4)
# filt: (128, 1*1*128//4, 4) = (128, 32, 4)
# conv: (56, 32, 56, 1, 4)
s = te.create_schedule(conv.op)
cfg = autotvm.get_config()
s[packed_data].compute_inline()
s[packed_filter].compute_inline()
A, B, C = packed_data, packed_filter, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_w, _ko, _h, _n, _ki = s[C].op.axis
kernel_scope, _n = s[C].split(_n, nparts=1)
cfg.define_split("tile_f", _ko, num_outputs=4)
cfg.define_split("tile_w", _w, num_outputs=4)
cfg.define_split("tile_h", _h, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
bk, vk, tk, ki = cfg["tile_f"].apply(s, C, _ko)
bw, vw, tw, wi = cfg["tile_w"].apply(s, C, _w)
bh, vh, th, hi = cfg["tile_h"].apply(s, C, _h)
s[C].reorder(bh, _n, vh, th, hi)
bhn = s[C].fuse(bh, _n)
s[C].bind(bk, te.thread_axis("blockIdx.z"))
s[C].bind(bhn, te.thread_axis("blockIdx.y"))
s[C].bind(bw, te.thread_axis("blockIdx.x"))
s[C].bind(vk, te.thread_axis("vthread"))
s[C].bind(vh, te.thread_axis("vthread"))
s[C].bind(vw, te.thread_axis("vthread"))
s[C].bind(tk, te.thread_axis("threadIdx.z"))
s[C].bind(th, te.thread_axis("threadIdx.y"))
s[C].bind(tw, te.thread_axis("threadIdx.x"))
s[C].reorder(bw, bk, bhn, vw, vk, vh, tw, tk, th, ki, hi, wi, _ki)
s[C].vectorize(_ki)
# TODO(csullivan): Try uneven workgroup split
# _wo, _wi = s[C].split(_w, factor=4)
# #_hno, _hni = s[C].split(_hn, factor=8)
# #s[C].reorder(_wo, _wi, _ko, _hno, _hni, _ki)
# s[C].reorder(_wo, _ko, _hn, _ki, _wi)
# s[C].unroll(_wi)
# # mace:
# # const int out_ch_blk = get_global_id(0);
# # const int out_w_blk = get_global_id(1);
# # const int out_hb = get_global_id(2);
# bx = te.thread_axis("blockIdx.x")
# by = te.thread_axis("blockIdx.y")
# bz = te.thread_axis("blockIdx.z")
# s[C].bind(_ko, bx)
# s[C].bind(_wo, by)
# s[C].bind(_hn, bz)
# s[Cl].compute_at(s[C], _hn)
s[Cl].compute_at(s[C], th)
_wl, _kol, _hl, _nl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
cfg.define_split("tile_c", _cl, num_outputs=2)
cfg.define_split("tile_kh", _khl, num_outputs=2)
cfg.define_split("tile_kw", _kwl, num_outputs=2)
_clo, _cli = cfg["tile_c"].apply(s, Cl, _cl)
_khlo, _khli = cfg["tile_kh"].apply(s, Cl, _khl)
_kwlo, _kwli = cfg["tile_kw"].apply(s, Cl, _kwl)
# s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli, _kol, _hl, _nl, _kil, _wl)
# s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli)
# s[Cl].reorder(_cl, _cl4, _kil, _wl)
s[Cl].unroll(_cl4)
s[Cl].unroll(_wl)
s[Cl].vectorize(_kil)
_wla, _cla, _hnla, _cl4a = s[Al].op.axis
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(_cl4a)
s[Al].unroll(_wla)
_clb, _rskolb, _kilb = s[Bl].op.axis
s[Bl].compute_at(s[Cl], _cli)
s[Bl].vectorize(_kilb)
s[Bl].unroll(_clb)
s[C].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
WO, K, HO, N, K4 = get_const_tuple(C.shape)
RSC, _, _ = get_const_tuple(B.shape)
cfg.add_flop(2 * N * K * K4 * HO * WO * RSC)
return s
def compute_conv2d_NCHWc_KCRSk(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
return te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
temp[
nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb
].astype(out_dtype)
* Filter[
ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb
].astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, conv):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_conv2d_NCHWc_KCRSk_acc32(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
output = te.compute(conv.shape, lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype("float32"))
return output
def schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
Input, Filter, stride, padding, dilation, out_dtype=None
):
"""Depthwise convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, channel_chunk, in_height, in_width, channel_block = Input.shape
_, channel_multiplier, kernel_h, kernel_w, _ = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel_chunk = simplify(channel_chunk * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x CMRSc = [N,(C//4)M,OH,OW, 4c]
# NCHWc x CMRS
# texture: NCH|W|c
# texture: C|MRS|c
# output: N
# m = mrs//RS
# rs = mrs % RS
# r = rs // W == (mrs // S) % R
# s = rs % W == mrs % S
Filter = te.compute(
(channel_chunk, channel_multiplier * kernel_h * kernel_w, channel_block),
lambda ffc, mrs, ffb: Filter[
ffc, mrs // (kernel_h * kernel_w), (mrs // kernel_w) % kernel_h, mrs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, out_channel_chunk, out_height, out_width, channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc // channel_multiplier,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[
ffc // channel_multiplier,
((ffc % channel_multiplier) * kernel_h + ry) * kernel_w + rx,
ffb,
]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc_kcrsk_texture",
)
return te.compute(
conv.shape, lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype("float32")
)
def schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
ry, rx = s[OL].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[OL].vectorize(fb)
# s[OL].unroll()
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
ICC, MKHKW, ICB = get_const_tuple(kernel.shape)
M = (OCC * OCB) // (ICC * ICB)
KHKW = MKHKW // M
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
def scheduler(compute, schedule, *args, **kwargs):
placeholders = compute(*args)
s = schedule(*placeholders, **kwargs)
return s, placeholders
def conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_NCHWc_RSCKk(*placeholders)
return s, placeholders
def conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_WCHNc_CRSKk(*placeholders)
return s, (placeholders[0], placeholders[1], placeholders[-1])
def conv2d_NCHWc_KCRSk(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
conv = compute_conv2d_NCHWc_KCRSk(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [conv]])
schedule_conv2d_NCHWc_KCRSk(cfg, s, conv)
return s, (data, filt, conv)
def conv2d_NCHWc_KCRSk_fp32_acc(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_conv2d_NCHWc_KCRSk_acc32(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def depthwise_conv2d_NCHWc_KCRSk_acc32(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
data, filt, [1, 1], [0, 0], [1, 1], "float32"
)
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def ref_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = 1
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def ref_depthwise_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = kernel.shape[0]
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
multiplier = kernel.shape[1]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def validate(workload, target, dev, input_shapes, *args, **kwargs):
s, placeholders = workload(*input_shapes, *args, **kwargs)
func = tvm.driver.build(s, [*placeholders], target=target, name="TestFunction")
args_tvm = []
args_np = []
for var in placeholders[:-1]:
var_np = np.random.uniform(size=[i.value for i in var.shape]).astype(var.dtype)
args_np.append(var_np)
args_tvm.append(tvm.nd.array(var_np, dev))
args_tvm.append(
tvm.nd.array(
np.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype), dev
)
)
func(*args_tvm)
if "plus_one" in workload.__name__:
np_result = args_np[0] + 1.0
elif "matmul" in workload.__name__:
if "inner" in workload.__name__:
np_result = np.matmul(
args_np[0].reshape(32, 256), args_np[1].reshape(32, 256).transpose(1, 0)
)
elif "accum" in workload.__name__:
np_result = np.matmul(
args_np[0].transpose((1, 0, 2)).reshape(64, 128), args_np[1].reshape(128, 64)
)
else:
np_result = np.matmul(
args_np[0].transpose((0, 2, 1)).reshape(128, 64),
args_np[1].transpose(1, 0, 2).reshape(64, 128),
)
elif "conv2d_1x1_NCHWc_RSCKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# rsckk -> rsck -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 2, 0, 1))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
elif "conv2d_1x1_WCHNc_CRSKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# wchnc -> nchw
args_np[0] = (
args_np[0]
.transpose((3, 1, 4, 2, 0))
.reshape(
args_np[0].shape[3],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[0],
)
)
# crskk -> crsk -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 0, 1, 2))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkkhw -> wkhnk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(4, 1, 3, 0, 2)
elif "NCHW_KCRS" in workload.__name__:
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
elif "NCHWc_KCRSk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# kcrsk/cmrsc -> kcrs/cmrs
args_np[1] = (
args_np[1]
.transpose((0, 4, 1, 2, 3))
.reshape(
args_np[1].shape[0] * args_np[1].shape[4],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3],
)
)
if "depthwise" in workload.__name__:
# np_result = testing.depthwise_conv2d_python_nchw(args_np[0], args_np[1], 1, "VALID")
np_result = ref_depthwise_convolution(args_np[0], args_np[1], [], [])
else:
# np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
np_result = ref_convolution(args_np[0], args_np[1], [], [])
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
np.testing.assert_allclose(args_tvm[-1].asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class BaseSingleShapeValidator:
@tvm.testing.parametrize_targets("opencl")
def test_unary(self, test_func, input_shape, target, dev):
validate(test_func, target, dev, [input_shape])
class TestPlusOneRank3(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 32, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank3, schedule_plus_one_rank3, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestPlusOneRank5(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 2, 4, 4, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank5, schedule_plus_one_rank5, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestMatmul:
input_shape = tvm.testing.parameter((32, 64, 4))
local = tvm.testing.parameter(False, True)
def matmul(input_shape, local):
return scheduler(compute_matmul, schedule_matmul, input_shape, local=local)
def matmul_inner(input_shape, local):
return scheduler(compute_matmul_inner, schedule_matmul_inner, input_shape, local=local)
test_func = tvm.testing.parameter(matmul, matmul_inner)
@tvm.testing.parametrize_targets("opencl")
def test_matmul(self, test_func, input_shape, local, target, dev):
validate(test_func, target, dev, [input_shape], local=local)
class TestMatmulVectorAccumulator:
shapeA = tvm.testing.parameter((32, 64, 4))
shapeB = tvm.testing.parameter((128, 16, 4))
local = tvm.testing.parameter(False, True)
def matmul_vector_accumulator(shapeA, shapeB, local):
return scheduler(
compute_matmul_vector_accumulator,
schedule_matmul_vector_accumulator,
shapeA,
shapeB,
local=local,
)
test_func = tvm.testing.parameter(matmul_vector_accumulator)
@tvm.testing.parametrize_targets("opencl")
def test_matmul_vec_acc(self, test_func, shapeA, shapeB, local, target, dev):
validate(test_func, target, dev, [shapeA, shapeB], local=local)
class BaseConv2DValidator:
@tvm.testing.parametrize_targets("opencl")
def test_conv2d(self, test_func, input_shapes, target, dev):
validate(test_func, target, dev, input_shapes)
class TestConv2dNCHWcRSCKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 32, 56, 56, 4), (1, 1, 128, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_NCHWc_RSCKk)
class TestConv2dWCHNcCRSKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(56, 32, 56, 1, 4), (128, 1, 1, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_WCHNc_CRSKk)
class TestConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter(
[(1, 32, 56, 56, 4), (32, 128, 1, 1, 4)], [(1, 32, 112, 112, 4), (32, 128, 3, 3, 4)]
)
test_func = tvm.testing.parameter(conv2d_NCHWc_KCRSk, conv2d_NCHWc_KCRSk_fp32_acc)
class TestDepthwiseConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 24, 257, 257, 4), (24, 1, 3, 3, 4)])
test_func = tvm.testing.parameter(depthwise_conv2d_NCHWc_KCRSk_acc32)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_autodiff.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te, topi
from tvm.testing import assert_allclose
from tvm.topi.utils import get_const_tuple
def check_grad(
out, inputs, args=[], data_range=(-10, 10), desired_grads=None, assert_no_jacobian=True
):
inputs = inputs if isinstance(inputs, list) else [inputs]
def check_device(device, host="llvm"):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(host):
return
sout = te.create_schedule(out.op)
mout = tvm.build(sout, [out] + inputs + args)
out_shape = get_const_tuple(out.shape)
l, h = data_range
input_data = [
tvm.nd.array(
np.random.uniform(l, h, size=get_const_tuple(input.shape)).astype(input.dtype)
)
for input in inputs
]
arg_vals = [
tvm.nd.array(np.random.uniform(l, h, size=get_const_tuple(arg.shape)).astype(arg.dtype))
for arg in args
]
ones = topi.full_like(out, 1.0)
# we provide head to sum and reduce the output dimension,
# which equals to grad(out.sum(), inputs)
grads = te.gradient(out, inputs, head=ones)
grad_sched = te.create_schedule([grad.op for grad in grads])
mgrad = tvm.build(grad_sched, list(grads) + inputs + args)
if assert_no_jacobian:
# TODO(yzhliu): it is better to visit the expression and do assertion
lowered_ir = str(tvm.lower(grad_sched, list(grads) + inputs + args, simple_mode=True))
assert "jacobian" not in lowered_ir, lowered_ir
grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)]
mgrad(*grad_data, *input_data, *arg_vals)
g_res = [g.numpy() for g in grad_data]
if desired_grads:
assert isinstance(desired_grads, list)
for actual, desired in zip(g_res, desired_grads):
assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
else:
def forward(*in_data):
out_data = tvm.nd.empty(out_shape, out.dtype)
mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
return out_data.numpy().sum()
tvm.testing.check_numerical_grads(
forward, [d.numpy() for d in input_data + arg_vals], g_res
)
check_device("cpu")
def test_basic_operation():
np.random.seed(0)
shape = (10, 10)
x = te.var("x", dtype="float32")
k = te.reduce_axis((0, 10), name="k")
l = te.reduce_axis((0, 10), name="l")
A0 = te.placeholder(shape, name="A0")
A1 = te.placeholder(shape, name="A1")
zeros = np.zeros(shape)
B = te.compute(shape, lambda i, j: A0[i, j], name="B")
check_grad(B, [A0])
B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name="B")
check_grad(B, [A0, A1])
B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0, data_range=(0.1, 10))
B = te.compute(shape, lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]), name="B")
check_grad(B, A0, data_range=(-4, 4))
B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name="B")
check_grad(B, A0)
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name="B")
check_grad(B, [A0, A1])
B = te.compute(
shape, lambda i, j: te.sum(A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k), name="B"
)
check_grad(B, A0)
def fcombine(x, y):
return x * y
def fidentity(t0):
return tvm.tir.const(1, t0)
prod = te.comm_reducer(fcombine, fidentity, name="prod")
B = te.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name="B")
check_grad(B, A0)
X = te.placeholder((10,), name="X")
A = te.compute((10,), lambda i: X[i] + X[9 - i])
B = te.compute((10,), lambda i: X[i] * X[9 - i])
Y = topi.tensordot(A, B, 1)
check_grad(Y, X)
X = te.placeholder((3, 3), name="X")
Y = topi.einsum("ii->i", (X))
check_grad(Y, X)
def test_topi():
X = te.placeholder((1, 2, 4, 4), name="X")
W = te.placeholder((5, 2, 3, 3), name="W")
W1 = te.placeholder((2, 5, 3, 3), name="W1")
W2 = te.placeholder((1,), name="W2")
R = topi.nn.conv2d(X, W, 1, 1, 1)
check_grad(R, [X, W])
R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1)
check_grad(R1, [X, W, W1])
R = topi.broadcast_to(W2, (5, 2, 3, 3))
check_grad(R, [W2])
R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1)
check_grad(R, [X, W2])
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "avg")
check_grad(R, X)
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(R, X)
X = te.placeholder((1, 2, 5, 5), name="X")
R = topi.reshape(X, (1, 32))
check_grad(R, [X])
X = te.placeholder((1, 2, 5, 5), name="X")
W = te.placeholder((2, 2, 3, 3), name="W")
S = topi.reshape(X, (1, 50))
check_grad(S, [X])
R = X + topi.nn.conv2d(X + topi.nn.conv2d(X, W, 1, 1, 1), W, 1, 1, 1)
check_grad(R, [X, W])
S = topi.nn.softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.sigmoid(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.tanh(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.nn.log_softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
check_grad(S, [W], [X])
X = te.placeholder((1, 2, 3, 5), name="X")
Y = te.placeholder((1, 2, 7, 5), name="Y")
S = topi.concatenate((X, Y), 2)
check_grad(S, [X, Y])
X = te.placeholder((1, 2, 6, 5), name="X")
(S, R) = topi.split(X, 2, 2)
check_grad(S, [X])
check_grad(R, [X])
R1 = topi.concatenate((S, R), 2)
check_grad(R1, [X])
R2 = topi.concatenate((R, S), 2)
check_grad(R2, [X])
X = te.placeholder((4, 5), name="X")
I = te.placeholder((100,), name="I", dtype="int32")
R = topi.take(X, topi.abs(I))
check_grad(R, [X], [I])
W = te.placeholder((5, 5), name="W")
exps = topi.exp(topi.nn.dense(X, W))
sumexps = topi.sum(exps, axis=-1, keepdims=True)
R = exps / sumexps
check_grad(R, [X, W], data_range=(-1, 1))
def test_stride_dilation():
X = te.placeholder((1, 2, 10, 10), name="X")
W = te.placeholder((2, 2, 1, 1), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 2, 2), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 3, 3), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
@pytest.mark.xfail
def test_reduction_init():
np.random.seed(0)
shape = (10, 10)
k = te.reduce_axis((0, 10), name="k")
A0 = te.placeholder(shape, name="A0")
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k, init=0.0), name="B")
check_grad(B, A0)
if __name__ == "__main__":
test_basic_operation()
test_topi()
test_stride_dilation()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_build_lower.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_lower_rfactor():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B.op].bind(xo, te.thread_axis("blockIdx.x"))
s[B.op].bind(xi, te.thread_axis("threadIdx.y"))
s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x"))
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
fapi = tvm.lower(s, [A, B])
def test_dependent_output_shape():
n, m, x = te.size_var("n"), te.size_var("m"), te.size_var("x")
A = te.placeholder((n, m))
B = te.compute((m, n // x), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
mod = tvm.build(s, [A, B, x])
def test_split_uneven_unique_likely():
a = te.placeholder(
(16, 16),
)
b = te.placeholder(
(16, 16),
)
c = te.compute((16, 16), lambda x, y: a[x, y] + b[x, y])
x, y = c.op.axis
sch = te.create_schedule(c.op)
xo, xi = sch[c].split(x, 5)
stmt = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.IfThenElse)
if __name__ == "__main__":
test_lower_rfactor()
test_dependent_output_shape()
test_split_uneven_unique_likely()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_create_primfunc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import numpy as np
import tvm
import tvm.testing
from tvm import te, tir, topi
from tvm.script import tir as T
def test_unique_name_complete_block():
A = te.placeholder((16, 16), name="A")
B = te.compute((16, 16), lambda x, y: A[x, y] * 2, name="main")
C = te.compute((16, 16), lambda x, y: B[x, y] + 1, name="main")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("main")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("main_1")), tir.schedule.StmtSRef)
def test_unique_name_reduction_block():
k1 = te.reduce_axis((0, 16), "k1")
k2 = te.reduce_axis((0, 16), "k2")
A = te.placeholder((16, 16), name="A")
B = te.compute((16,), lambda i: te.sum(A[i, k1], axis=k1), name="sum")
C = te.compute((), lambda: te.sum(B[k2], axis=k2), name="sum")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("sum")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("sum_1")), tir.schedule.StmtSRef)
def _check_workload(te_workload, tir_workload):
func = te.create_prim_func(te_workload())
tvm.ir.assert_structural_equal(func, tir_workload)
# make sure that we can create schedule from the func
s = tir.Schedule(func, debug_mask="all")
assert s
def te_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [A, B, C]
@T.prim_func
def tir_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_matmul():
_check_workload(te_matmul, tir_matmul)
def te_element_wise():
A = te.placeholder((128, 128), name="A")
B = te.compute((128, 128), lambda x, y: A[x, y] * 2, name="B")
C = te.compute((128, 128), lambda x, y: B[x, y] + 1, name="C")
return [A, C]
@T.prim_func
def tir_element_wise(a: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
B[i, j] = A[i, j] * 2.0
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
C[i, j] = B[i, j] + 1.0
def test_element_wise():
_check_workload(te_element_wise, tir_element_wise)
def te_conv2d():
batch = 16
in_channel = 16
out_channel = 32
size = 14
kernel = 3
A = te.placeholder((batch, in_channel, size, size), name="A")
W = te.placeholder((in_channel, kernel, kernel, out_channel), name="W")
Apad = te.compute(
(batch, in_channel, size + 2, size + 2),
lambda nn, cc, yy, xx: tvm.tir.if_then_else(
tvm.tir.all(yy >= 1, yy - 1 < size, xx >= 1, xx - 1 < size),
A[nn, cc, yy - 1, xx - 1],
0.0,
),
name="Apad",
)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel), name="ry")
rx = te.reduce_axis((0, kernel), name="rx")
B = te.compute(
(batch, out_channel, size, size),
lambda nn, ff, yy, xx: te.sum(
Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff], axis=[rc, ry, rx]
),
name="B",
)
return [A, W, B]
@T.prim_func
def tir_conv2d(a: T.handle, w: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 16, 14, 14])
W = T.match_buffer(w, [16, 3, 3, 32])
B = T.match_buffer(b, [16, 32, 14, 14])
Apad = T.alloc_buffer([16, 16, 16, 16])
for n, c, y, x in T.grid(16, 16, 16, 16):
with T.block("Apad"):
nn, cc, yy, xx = T.axis.remap("SSSS", [n, c, y, x])
Apad[nn, cc, yy, xx] = T.if_then_else(
1 <= yy and yy < 15 and 1 <= xx and xx < 15,
A[nn, cc, yy - 1, xx - 1],
0.0,
dtype="float32",
)
for n, f, y, x, kc, ky, kx in T.grid(16, 32, 14, 14, 16, 3, 3):
with T.block("B"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [n, f, y, x, kc, ky, kx])
with T.init():
B[nn, ff, yy, xx] = 0.0
B[nn, ff, yy, xx] += Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff]
def test_conv2d():
_check_workload(te_conv2d, tir_conv2d)
def te_multi_output():
n = te.var("n")
m = te.var("m")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name="B")
return [A0, A1, B0, B1]
@T.prim_func
def tir_multi_output(a0: T.handle, a1: T.handle, b0: T.handle, b1: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
A0 = T.match_buffer(a0, (m, n))
A1 = T.match_buffer(a1, (m, n))
B0 = T.match_buffer(b0, (m, n))
B1 = T.match_buffer(b1, (m, n))
for i0, i1 in T.grid(m, n):
with T.block("B.v0"):
i, j = T.axis.remap("SS", [i0, i1])
B0[i, j] = A0[i, j] + 2.0
with T.block("B.v1"):
i, j = T.axis.remap("SS", [i0, i1])
B1[i, j] = A1[i, j] * 3.0
def test_multi_output():
_check_workload(te_multi_output, tir_multi_output)
def te_extern():
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.extern(
(128, 128),
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], 0, 0
),
name="C",
)
return [A, B, C]
@T.prim_func
def tir_extern(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
off1 = te.var("elem_offset")
off2 = te.var("elem_offset_1")
off3 = te.var("elem_offset_2")
A = T.match_buffer(a, (128, 128), elem_offset=off1)
B = T.match_buffer(b, (128, 128), elem_offset=off2)
C = T.match_buffer(c, (128, 128), elem_offset=off3)
# body
with T.block("C"):
T.reads([A[0:128, 0:128], B[0:128, 0:128]])
T.writes([C[0:128, 0:128]])
T.evaluate(
T.tvm_call_packed(
"tvm.contrib.cblas.matmul",
T.tvm_stack_make_array(
A.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off1,
dtype="handle",
),
T.tvm_stack_make_array(
B.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off2,
dtype="handle",
),
T.tvm_stack_make_array(
C.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off3,
dtype="handle",
),
0,
0,
dtype="int32",
)
)
def test_extern():
_check_workload(te_extern, tir_extern)
def te_reordered_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [C, A, B]
@T.prim_func
def tir_reordered_matmul(c: T.handle, a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_arg_order():
_check_workload(te_reordered_matmul, tir_reordered_matmul)
def te_scan():
m = te.var("m")
n = te.var("n")
X = te.placeholder((m, n), name="X")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i])
s_scan = tvm.te.scan(s_init, s_update, s_state, inputs=[X])
return [X, s_scan]
def test_error_reporting():
try:
te.create_prim_func(te_scan())
assert False
except TypeError as e:
error_message = str(e)
assert error_message.find("Unsupported Operation: ScanOp.") != -1
return
assert False
def test_constant():
M = 11
A = te.placeholder((M,), name="A")
B = te.compute(tuple(), lambda: 2, name="B")
# Manually craft ProducerLoad because `B[]` is not allowed.
C = te.compute(
(M,), lambda x: A[x] + tvm.tir.expr.ProducerLoad(B, []), name="C", tag="broadcast"
)
func = te.create_prim_func([C, A])
func = tvm.build(func)
a_np = np.random.uniform(size=(M,)).astype(A.dtype)
c = tvm.nd.array(np.zeros(M, dtype=C.dtype))
x = func(c, tvm.nd.array(a_np))
tvm.testing.assert_allclose(a_np + 2, c.numpy())
def test_data_dependent_access():
A = te.placeholder((10,), name="A")
B = te.placeholder((10,), name="B", dtype="int32")
C = te.compute((10,), lambda i: A[B[i]])
func = te.create_prim_func([C, A, B])
func = tvm.build(func)
a_np = np.random.uniform(size=(10,)).astype(A.dtype)
b_np = np.arange(10, dtype=B.dtype)
c = tvm.nd.array(np.zeros(10, dtype=C.dtype))
func(c, tvm.nd.array(a_np), tvm.nd.array(b_np))
tvm.testing.assert_allclose(a_np[b_np], c.numpy())
def test_select_simplify():
placeholder = te.placeholder([1, 128, 10, 10, 4], dtype="float32")
tensor = topi.nn.adaptive_pool(placeholder, [1, 1], "avg", "NCHW4c")
result = te.create_prim_func([placeholder, tensor])
script_func = result.script()
# There should be no Select
assert script_func.find("Select") == -1
# There should be no undefined vars
assert script_func.find("Var") == -1
def test_tensor_attr():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
func = te.create_prim_func([A, B, C])
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func)
@T.prim_func
def expected_layout_attr(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
D: T.Buffer[(128, 128), "float32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
C = T.alloc_buffer([128, 128], dtype="float32")
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C"):
x, y, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C[x, y] = T.float32(0)
C[x, y] = C[x, y] + A[x, k] * B[y, k]
for i0, i1 in T.grid(128, 128):
with T.block("D"):
x, y = T.axis.remap("SS", [i0, i1])
D[x, y] = C[x, y] + T.float32(1)
def test_tensor_layout_attr():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
D = te.compute(
(128, 128),
lambda x, y: C[x, y] + 1,
name="D",
attrs={"layout_free_placeholders": [C]},
)
func = te.create_prim_func([A, B, D])
tvm.ir.assert_structural_equal(func, expected_layout_attr)
def te_argmax_idx_val():
def f_combine(x, y):
lhs = tvm.tir.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.tir.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.tir.const(-1, dtype0), tvm.te.min_value(dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="float32")
k = te.reduce_axis((0, n), "k")
max_idx, max_val = te.compute(
(m,), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name="argmax"
)
return [idx, val, max_idx, max_val]
@T.prim_func
def tir_argmax_idx_val(
var_idx: T.handle, var_val: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
val = T.match_buffer(var_val, [m, n], dtype="float32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="int32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="float32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.int32(-1)
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def te_argmax_val_idx():
def f_combine(x, y):
lhs = tvm.tir.Select((x[0] >= y[0]), x[0], y[0])
rhs = tvm.tir.Select((x[0] >= y[0]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.te.min_value(dtype0), tvm.tir.const(-1, dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
val = te.placeholder((m, n), name="val", dtype="float32")
idx = te.placeholder((m, n), name="idx", dtype="int32")
k = te.reduce_axis((0, n), "k")
max_val, max_idx = te.compute(
(m,), lambda i: argmax((val[i, k], idx[i, k]), axis=k), name="argmax"
)
return [val, idx, max_val, max_idx]
@T.prim_func
def tir_argmax_val_idx(
var_val: T.handle, var_idx: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.var("int32")
n = T.var("int32")
val = T.match_buffer(var_val, [m, n], dtype="float32")
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="float32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="int32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.min_value("float32")
argmax_v1[i] = T.int32(-1)
v_argmax_v0: T.float32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v0[i], val[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v1[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def test_argmax_idx_val():
_check_workload(te_argmax_idx_val, tir_argmax_idx_val)
def test_argmax_val_idx():
_check_workload(te_argmax_val_idx, tir_argmax_val_idx)
def test_int64_indices():
n = te.var("n", "int64")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
prim_func = te.create_prim_func([A, B])
loop = prim_func.body.block.body
assert loop.loop_var.dtype == "int64"
assert loop.min.dtype == "int64"
assert loop.extent.dtype == "int64"
def test_zero_dim_add():
def te_func():
a = te.placeholder((), name="a", dtype="int32")
b = te.placeholder((), name="b", dtype="int32")
c = te.compute(a.shape, lambda *i: a(*i) + b(*i), name="c")
return [a, b, c]
@T.prim_func
def expected(
a: T.Buffer[(), "int32"],
b: T.Buffer[(), "int32"],
c: T.Buffer[(), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
with T.block("c"):
vi = T.axis.spatial(1, 0)
T.reads(a[()], b[()])
T.writes(c[()])
c[()] = a[()] + b[()]
_check_workload(te_func, expected)
if __name__ == "__main__":
test_unique_name_complete_block()
test_unique_name_reduction_block()
test_matmul()
test_element_wise()
test_conv2d()
test_multi_output()
test_extern()
test_arg_order()
test_error_reporting()
test_constant()
test_select_simplify()
test_tensor_attr()
test_tensor_layout_attr()
test_argmax_idx_val()
test_argmax_val_idx()
test_int64_indices()
test_zero_dim_add()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_group.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test group effect"""
import tvm
from tvm import te
def test_scan_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i])
s_update1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i])
s_update2 = te.compute((m, n), lambda t, i: s_update1[t, i] + 1)
s_update3 = te.compute((m, n), lambda t, i: s_update2[t, i] + 1)
res = tvm.te.scan(s_init, s_update3, s_state, inputs=x)
s = te.create_schedule(res.op)
assert s[s_update1].group is not None
assert s[s_update2].group == s[s_update1].group
# Assign within group, is valid
s[s_update1].compute_at(s[s_update2], s_update2.op.axis[1])
# create a new group, for [s_update2 and s_update1]
g2 = s.create_group(outputs=s_update2, inputs=[s_state, x])
assert g2.group is not None
assert g2.group == s[s_update3].group
assert s[s_update2].group == g2
assert s[s_update1].group == g2
g2.compute_at(s[s_update3], s_update3.op.axis[1])
assert g2.attach_stage == s[s_update3]
try:
# compute outside group error.
s[s_update2].compute_at(s[s_init], s_init.op.axis[0])
assert False
except tvm.error.TVMError:
pass
def test_compute_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g = s.create_group(outputs=x1, inputs=x, include_inputs=True)
assert s[x1].group == g
assert s[x].group == g
g.compute_at(s[x2], x2.op.axis[1])
assert g.attach_stage == s[x2]
assert g.num_child_stages == 2
def test_nest_group():
m = te.size_var("m")
n = te.size_var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g1 = s.create_group(outputs=x1, inputs=x)
g2 = s.create_group(outputs=x1, inputs=x, include_inputs=True)
assert set(s.groups) == set([g1, g2])
assert s[x].group == g2
assert s[x1].group == g1
assert g1.group == g2
assert g2.num_child_stages == 2
assert g1.num_child_stages == 1
if __name__ == "__main__":
test_nest_group()
test_compute_group()
test_scan_group()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_hybrid_script.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm, inspect, sys, traceback, numpy, pytest, types, os
from tvm import te
from tvm.contrib import utils
from tvm.te.hybrid import script
from tvm.te.hybrid.runtime import HYBRID_GLOBALS
import tvm.testing
@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target="llvm", sch=None, outs=None):
def tvm_val_2_py_val(val):
val = tvm.tir.stmt_functor.substitute(val, var_dict)
val = tvm.arith.Analyzer().simplify(val)
assert isinstance(val, (tvm.tir.IntImm,))
return val.value
dev = tvm.device(target, 0)
op = None
if sch is None:
outs = func(*tuple(tvm.runtime.convert(i) if isinstance(i, list) else i for i in args))
op = outs[0].op if isinstance(outs, list) else outs.op
sch = te.create_schedule(op)
else:
assert outs is not None
assert isinstance(outs, list)
op = outs[0].op
emu_args = []
nd_args = []
for i in args:
if isinstance(i, te.tensor.Tensor):
shape = [tvm_val_2_py_val(j) for j in i.shape]
emu_args.append(numpy.random.randn(*shape).astype(i.dtype))
nd_args.append(tvm.nd.array(emu_args[-1], dev))
elif isinstance(i, tvm.tir.Var):
emu_args.append(tvm_val_2_py_val(i))
nd_args.append(emu_args[-1])
else:
assert isinstance(i, list)
emu_args.append(numpy.array(i))
compile_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] + (
outs if isinstance(outs, list) else [outs]
)
module = tvm.build(sch, compile_args, target=target)
assert module
out_tensors = []
for i in range(op.num_outputs):
output = op.output(i)
shape = [tvm_val_2_py_val(j) for j in output.shape]
nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), dev))
out_tensors.append(nd_args[-1])
ref_data = func(*emu_args)
if isinstance(ref_data, numpy.ndarray):
ref_data = [ref_data]
module(*nd_args)
for nd, np in zip(out_tensors, ref_data):
tvm.testing.assert_allclose(nd.numpy(), np, rtol=1e-5, atol=1e-5)
module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = te.hybrid.build(sch, module_args, module_outs)
return h_module, module_args, module_outs
@script
def outer_product(n, m, a, b):
"""This is a simple outer product.
Actually this function is not required to be documented.
I write this docstring to test skipping docstring functionality.
"""
c = output_tensor((n, m), a.dtype)
for i in range(n):
for j in range(m):
assert i < n and j < m, "index out of range!"
c[i, j] = a[i] * b[j]
return c
@tvm.testing.skip_if_wheel_test
# Test global function
# Test bridge between frontend and backend
def test_outer_product():
n = te.size_var("n")
m = te.size_var("m")
a = te.placeholder((n,), name="a")
b = te.placeholder((m,), name="b")
try:
c = outer_product(n, m, a, b)
ir = c.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
# Check for i in (0, n)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert ir.extent.name == "n"
ibody = ir.body
assert isinstance(ibody, tvm.tir.For)
# Check for j in (0, m)
assert ibody.loop_var.name == "j"
assert ibody.min.value == 0
assert ibody.extent.name == "m"
# Check loop body
jblock = ibody.body
assert isinstance(jblock, tvm.tir.SeqStmt)
jbody = jblock[0]
assert isinstance(jbody, tvm.tir.AssertStmt)
assert isinstance(jbody.message, tvm.tir.StringImm)
assert jbody.message.value == "index out of range!"
jbody = jblock[1]
assert isinstance(jbody, tvm.tir.ProducerStore)
assert jbody.producer.op.name == "c"
assert len(jbody.indices) == 2
assert jbody.indices[0].name == "i"
assert jbody.indices[1].name == "j"
assert isinstance(jbody.value, tvm.tir.Mul)
mul = jbody.value
assert isinstance(mul.a, tvm.tir.ProducerLoad)
assert mul.a.producer.name == "a"
assert mul.b.producer.name == "b"
func, ins, outs = run_and_check(outer_product, [n, m, a, b], {n: 99, m: 101})
temp = utils.tempdir()
path = temp.relpath("%s.py" % func.name)
func.save(path)
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
for key, _ in HYBRID_GLOBALS.items():
assert key not in globals().keys()
assert key not in outer_product.__globals__.keys()
@tvm.testing.skip_if_wheel_test
# Test local function
# Test allocation of local variable
def test_fanout():
@script
def fanout(n, a):
three = 3.0
b = output_tensor((a.shape[0] - 3,), a.dtype)
for i in range(a.shape[0] - 3):
sigma = 0.0
for j in range(3):
sigma += a[i + j]
sigma = sigma / three
b[i] = sigma
return b
n = te.size_var("n")
a = te.placeholder((n,), "float32", name="a")
try:
b = fanout(n, a)
ir = b.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
# Check for i in (0, n-3)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert tvm.ir.structural_equal(ir.extent, n - 3)
# Check loopbody
abody = ir.body
assert isinstance(abody, tvm.tir.ProducerRealize)
assert abody.bounds[0].min.value == 0
assert abody.bounds[0].extent.value == 1
assert abody.producer.op.name == "sigma"
# Check i loop body
rbody = abody.body
assert isinstance(rbody[0], tvm.tir.ProducerStore)
assert rbody[0].producer.op.name == "sigma"
assert len(rbody[0].indices) == 1
assert rbody[0].indices[0].value == 0
# Check fanout loop
jloop = rbody[1]
assert jloop.loop_var.name == "j"
assert jloop.min.value == 0
assert jloop.extent.value == 3
jbody = jloop.body
assert isinstance(jbody, tvm.tir.ProducerStore)
assert len(jbody.indices) == 1
assert jbody.indices[0].value == 0
assert jbody.producer.op.name == "sigma"
assert isinstance(jbody.value, tvm.tir.Add)
value = jbody.value
assert isinstance(value.a, tvm.tir.ProducerLoad)
assert value.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert value.b.producer.name == "a"
assert len(value.b.indices) == 1
assert tvm.ir.structural_equal(value.b.indices[0], ir.loop_var + jloop.loop_var)
divide = rbody[2]
assert isinstance(divide, tvm.tir.ProducerStore)
assert len(divide.indices) == 1
assert divide.indices[0].value == 0
value = divide.value
assert isinstance(value, tvm.tir.Mul)
assert value.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert abs(value.b.value - (1 / 3.0)) < 1e-5
write = rbody[3]
assert isinstance(write, tvm.tir.ProducerStore)
assert write.producer.op.name == "b"
assert write.value.producer.name == "sigma"
assert len(write.value.indices) == 1
assert write.value.indices[0].value == 0
func, ins, outs = run_and_check(fanout, [n, a], {n: 10})
run_and_check(func, ins, {n: 10}, outs=outs)
def test_looptype():
@script
def looptype(a, b, c):
d = output_tensor((16,), "int32")
e = output_tensor((16,), "int32")
f = output_tensor((16,), "int32")
for i in parallel(16):
d[i] = a[i]
for j in vectorize(16):
e[j] = b[j]
for k in unroll(16):
f[k] = c[k]
return d, e, f
a = te.placeholder((16,), name="a", dtype="int32")
b = te.placeholder((16,), name="b", dtype="int32")
c = te.placeholder((16,), name="c", dtype="int32")
try:
d, e, f = looptype(a, b, c)
ir = d.op.body
except:
return
iloop = ir[0]
jloop = ir[1]
kloop = ir[2]
assert iloop.kind == tvm.tir.ForKind.PARALLEL
assert jloop.kind == tvm.tir.ForKind.VECTORIZED
assert kloop.kind == tvm.tir.ForKind.UNROLLED
func, ins, outs = run_and_check(looptype, [a, b, c])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_if():
@script
def if_then_else(a):
b = output_tensor((10,), "int32")
c = output_tensor((10,), "int32")
for i in range(10):
if i % 2 == 0:
c[i] = a[i]
else:
c[i] = b[i]
for i in unroll(10):
b[i] = -1 if i % 2 == 0 else 1
return b, c
a = te.placeholder((10,), dtype="int32", name="a")
func, ins, outs = run_and_check(if_then_else, [a])
run_and_check(func, ins, outs=outs)
@script
def if_triple_condition(a):
b = output_tensor((10,), "int32")
for i in range(10):
if 0 <= i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_triple_condition, [a])
run_and_check(func, ins, outs=outs)
@script
def if_and(a):
b = output_tensor((10,), "int32")
for i in range(10):
if i >= 0 and i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_and, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_bind():
@script
def vec_add(a, b):
c = output_tensor((1000,), "float32")
for tx in bind("threadIdx.x", 1000):
c[tx] = a[tx] + b[tx]
return c
a = te.placeholder((1000,), dtype="float32", name="a")
b = te.placeholder((1000,), dtype="float32", name="b")
func, ins, outs = run_and_check(vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@script
def raw(a, b):
c = output_tensor((1000,), "float32")
for i in range(1000):
c[i] = a[i] + b[i]
return c
c = raw(a, b)
sch = te.create_schedule(c.op)
x = te.thread_axis("threadIdx.x")
sch[c].bind(c.op.axis[0], x)
func, ins, outs = run_and_check(raw, [a, b], sch=sch, outs=[c], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, "local")
len_i = a.shape[0]
len_j = a.shape[1]
for i in bind("threadIdx.x", len_i):
total[0] = 0.0
for k in const_range(len_j):
total[0] += a[i, k]
c[i] = total[0]
return c
a = te.placeholder((8, 4), "float32")
c = foo(a)
s = te.create_schedule(c.op)
ir = tvm.lower(s, [a, c])
func, ins, outs = run_and_check(foo, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
m = max_num_threads(True)
for i in bind("threadIdx.x", m):
for j in bind("blockIdx.x", ceil_div(n, m)):
if i * m + j < n:
b[i * m + j] = a[i * m + j] + a[i * m + j]
return b
a = te.placeholder((10000,), "float32")
with tvm.target.Target("cuda"):
func, ins, outs = run_and_check(max_threads, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_math_intrin():
@script
def intrin_real(a):
b = output_tensor((8,), "float32")
b[0] = sqrt(a[0])
b[1] = log(a[1])
b[2] = exp(a[2])
b[3] = sigmoid(a[3])
b[4] = power(a[4], a[5])
b[5] = tanh(a[5])
b[6] = min(a[4], a[5])
b[7] = max(a[5], a[6])
return b
a8 = te.placeholder((8,), dtype="float32", name="a")
b8 = intrin_real(a8)
sch = te.create_schedule(b8.op)
func = tvm.build(sch, [a8, b8])
assert func
a = numpy.arange(2, 10).astype("float32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.zeros((8,), dtype="float32"))
b = intrin_real(a)
func(tvm_a, tvm_b)
tvm.testing.assert_allclose(b, tvm_b.numpy(), rtol=1e-5)
@script
def intrin_int(a):
b = output_tensor((1,), "int32")
b[0] = popcount(a[0])
return b
a1 = te.placeholder((1,), dtype="int32")
b1 = intrin_int(a1)
sch = te.create_schedule(b1.op)
func = tvm.build(sch, [a1, b1])
assert func
a = numpy.array([114514]).astype("int32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.array([0]).astype("int32"))
b = intrin_int(a)
func(tvm_a, tvm_b)
assert tvm_b.numpy()[0] == b[0]
@tvm.testing.skip_if_wheel_test
# test non caconical loops
def test_non_zero():
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), "float32")
for i in range(2, 32):
for j in range(2, 32):
s = 0.0
for di in range(3):
for dj in range(3):
s += a[i - di, j - dj]
b[i - 2, j - 2] = s / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype="float32")
for i in range(10):
for j in range(i, 10):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((10,), dtype="float32", name="a")
b = te.placeholder((10,), dtype="float32", name="b")
func, ins, outs = run_and_check(triangle, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_allocate():
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), "float32")
for i in range(30):
ha = allocate((3, 30), "float32")
for j in range(3):
for k in range(30):
ha[j, k] = a[i + j, k] + a[i + j, k + 1] + a[i + j, k + 2]
for j in range(30):
b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
b = blur2d(a)
sch = te.create_schedule(b.op)
func, ins, outs = run_and_check(blur2d, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256,), "float32")
shared = allocate((256,), "float32", "shared")
for i in bind("threadIdx.x", 256):
shared[i] = a[i]
local = allocate((256,), "float32", "local")
for i in bind("threadIdx.x", 256):
local[i] = b[i]
for i in bind("threadIdx.x", 256):
c[i] = shared[i] + local[i]
return c
a = te.placeholder((256,), dtype="float32", name="a")
b = te.placeholder((256,), dtype="float32", name="b")
c = share_vec_add(a, b)
func, ins, outs = run_and_check(share_vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_upstream():
@te.hybrid.script
def upstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = te.placeholder((20,), "float32")
c = te.compute((20,), lambda x: a[x] + b[x])
d = upstream(c)
sch = te.create_schedule([c.op, d.op])
ir = tvm.lower(sch, [a, b, d])
func = tvm.build(sch, [a, b, d])
assert func
a = numpy.random.randn(20).astype("float32")
b = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,), "float32")
for i in range(20):
ref[i] = (a[i] + b[i]) * i
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(b)
tvm_d = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
func(tvm_a, tvm_b, tvm_d)
tvm.testing.assert_allclose(tvm_d.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_downstream():
@te.hybrid.script
def downstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = downstream(a)
c = te.compute((20,), lambda x: b[x] + 1.0)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c])
assert module
a = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,)).astype("float32")
for i in range(20):
ref[i] = (a[i] * i) + 1.0
tvm_a = tvm.nd.array(a)
tvm_c = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
module(tvm_a, tvm_c)
tvm.testing.assert_allclose(tvm_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_const_param():
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11,), "int32")
for i in range(11):
c[i] = a[i] + b
return c
a = te.placeholder((11,), dtype="int32", name="a")
b = tvm.tir.const(11, "int32")
c = add_something(a, b)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c], "llvm")
assert module
np_a = numpy.arange(11).astype("int32")
np_b = 11
np_c = numpy.zeros((11,)).astype("int32")
nd_a = tvm.nd.array(np_a)
nd_c = tvm.nd.array(numpy.zeros((11,)).astype("int32"))
module(nd_a, nd_c)
ref = add_something(np_a, 11)
tvm.testing.assert_allclose(nd_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_value_index():
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16,), "int32")
c = output_tensor((4, 4), "int32")
for i in range(16):
b[i] = a[i] + 2
c[i // 4, i % 4] = a[i] + 1
return b, c
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), "int32")
for i in range(4):
for j in range(4):
c[i, j] = a[i * 4 + j] * b[i, j]
return c
a = te.placeholder((16,), "int32")
b, c = kernel_a(a)
d = kernel_b(c, b)
sch = te.create_schedule(d.op)
module = tvm.build(sch, [a, d])
assert module
np_a = numpy.arange(16).astype("int32")
np_b, np_c = kernel_a(np_a)
ref = kernel_b(np_c, np_b)
res = tvm.nd.array(numpy.zeros((4, 4)).astype("int32"))
module(tvm.nd.array(np_a), res)
tvm.testing.assert_allclose(res.numpy(), ref)
@tvm.testing.skip_if_wheel_test
def test_func_call():
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
for i in range(len(a)):
b[i] = i + 1.0
c = outer_product(10, 10, a, b)
d = output_tensor(c.shape, c.dtype)
for i in range(10):
for j in range(10):
d[i, j] = c[i, j] + i * j
return d
a = te.placeholder((10,), name="a")
b = te.placeholder((10,), name="b")
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_bool():
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
for i in range(1, a.shape[0] - 1):
if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:
b[i] = a[i]
else:
b[i] = 0.0
return b
a = te.placeholder((10,), name="a")
func, ins, outs = run_and_check(foo, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_const_range():
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, "int32")
for i in const_range(2):
for j in const_range(5):
c[i, j] = float32(int32(a[i, j]) + b[i, j])
for i in const_range(len(b)):
for j in const_range(len(b[0])):
d[i, j] = int32(a[i, j] + b[i, j])
return c, d
a = te.placeholder((2, 5), name="a", dtype="float32")
b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in const_range(len_b * 2):
if i < len_b:
c[i] = a[i] + b[i]
else:
c[i - len_b] = a[i - len_b] + b[i - len_b]
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
c = goo(a, tvm.runtime.convert(b))
sch = te.create_schedule(c.op)
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in range(a.shape[0]):
for j in const_range(len(b)):
d = a[i] * b[j]
d += a[i] + b[j]
c[i] = d
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
func, ins, outs = run_and_check(hoo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_schedule():
@script
def outer_product(a, b):
c = output_tensor((64, 64), a.dtype)
for i in range(64):
for j in range(64):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((64,), name="a", dtype="float32")
b = te.placeholder((64,), name="b", dtype="float32")
c = outer_product(a, b)
# Test perfect loop split
# Test loop reorder
# Test loop annotation
sch = te.create_schedule(c.op)
i, j = c.op.axis
io, ii = sch[c].split(i, 4)
sch[c].parallel(ii)
jo, ji = sch[c].split(j, 4)
joo, joi = sch[c].split(jo, 4)
sch[c].vectorize(ji)
sch[c].reorder(ii, io, joo, joi, ji)
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.inner"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.inner"
ir = ir.body
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test fuse
sch = te.create_schedule(c.op)
sch[c].fuse(c.op.axis[0], c.op.axis[1])
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.j.fused"
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test imperfect loop split
sch = te.create_schedule(c.op)
sch[c].split(c.op.axis[0], 3)
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test loop binds
@tvm.testing.skip_if_wheel_test
def test_capture():
n = 8
constant_tuple = (10, n)
constant_list = [[1, 2], [3, n]]
const_value = 1
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), "int32")
for i in range(constant_tuple[1]):
c[i] = a[i] + constant_list[1][const_value]
return c
a = te.placeholder((n,), dtype="int32", name="a")
func, ins, outs = run_and_check(add_something, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_array_inputs():
@script
def sum_array(inputs):
out = output_tensor((10,), inputs[0].dtype)
n = len(inputs)
for i in range(10):
for j in const_range(n):
out[i] += inputs[j][i]
return out
n = 5
inputs = []
for i in range(n):
inputs.append(te.placeholder((10,), name="t%s" % i, dtype="float32"))
out = sum_array(tvm.runtime.convert(inputs))
assert len(out.op.inputs) == n
sch = te.create_schedule(out.op)
mod = tvm.build(sch, inputs + [out], target="llvm")
assert mod
input_nd = []
out_ref = numpy.zeros((10,))
for _ in range(n):
arr = numpy.random.uniform(size=(10,)).astype("float32")
input_nd.append(tvm.nd.array(arr))
out_ref += arr
out_nd = tvm.nd.array(numpy.zeros((10,), "float32"))
mod(*input_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_ref)
if __name__ == "__main__":
test_outer_product()
test_fanout()
test_looptype()
test_if()
test_bind()
test_math_intrin()
test_non_zero()
test_allocate()
test_upstream()
test_downstream()
test_const_param()
test_value_index()
test_func_call()
test_bool()
test_const_range()
test_schedule()
test_capture()
test_array_inputs()
# TODO:
# test_inplace()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import pickle as pkl
def test_schedule_create():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
AA = te.compute((m, l), lambda i, j: A[i, j])
T = te.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = te.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
# save load json
json_str = tvm.ir.save_json(s)
s_loaded = tvm.ir.load_json(json_str)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
# pickle unpickle
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
def test_reorder():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute(m, lambda i: A[i + 1])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
# pass duplicate IterVar
# must raise an error
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.error.TVMError:
pass
def test_split():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_fuse_with_split():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
fused = s[T].fuse(xi, y)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (xo, fused)
def test_fuse_with_out_of_order_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
with pytest.raises(RuntimeError):
fused = s[T].fuse(xo, y) # should throw here
def test_fuse_with_out_of_order_axis_with_reorder():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
fused = s[T].fuse(y, xo) # should be ok
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
with pytest.raises(RuntimeError):
fused = s[T].fuse(y, xi) # should throw here
def test_singleton():
A = te.placeholder((), name="A")
T = te.compute((), lambda: A() + 1)
s = te.create_schedule(T.op)
fused = s[T].fuse()
assert any(isinstance(x, tvm.te.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
def test_vectorize():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.te.schedule.IterVar.Unrolled
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
def test_vectorize_commreduce():
V = te.placeholder((128,), name="V")
ax = te.reduce_axis((0, 128), name="ax")
O = te.compute((1,), lambda _: te.sum(V[ax], axis=[ax]))
s = te.create_schedule(O.op)
with pytest.raises(RuntimeError):
s[O].vectorize(ax) # should throw here
def test_pragma():
m = 100
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = te.size_var("n")
k1 = te.reduce_axis((0, n), name="k1")
k2 = te.reduce_axis((0, n), name="k2")
A = te.placeholder((n, n, n), name="A")
B = te.compute((n,), lambda i: te.sum(A[i, k1, k2], axis=[k1, k2]))
# normal schedule
s = te.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert tuple(BF.shape) == (n, n)
assert set(BF.op.body[0].axis) == set([k2])
assert s[B].op.body[0].axis[0].dom.extent == n
assert len(s[B].all_iter_vars) == 2
# schedule with split
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert BF.shape[0].value == 4
assert BF.shape[1] == n
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
# schedule with factor_axis
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert n == BF.shape[0]
assert BF.shape[1].value == 4
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
def test_tensor_intrin():
n = 16
x = te.placeholder((n,), name="x")
y = te.placeholder((n,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
def intrin_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0].value == n
return tvm.tir.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = te.decl_tensor_intrin(z.op, intrin_func)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0].value == n
m = 32
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=n)
s[z].tensorize(xi, intrin)
assert s[z].iter_var_attrs[xi].tensor_intrin == intrin
assert s[z].iter_var_attrs[xi].iter_type == tvm.te.schedule.IterVar.Tensorized
def test_tensor_intrin_scalar_params():
n = te.size_var("n")
x = te.placeholder((n,), name="x")
v = te.size_var("v")
w = te.size_var("w")
z = te.compute((n,), lambda i: x[i] * v + w, name="z")
def intrin_func(ins, outs, sp):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0] == n
assert sp[0] == v
assert sp[1] == w
return tvm.tir.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
intrin = te.decl_tensor_intrin(
z.op, intrin_func, scalar_params=[v, w], default_buffer_params={"offset_factor": 1}
)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0] == n
assert tuple(intrin.scalar_params) == tuple((v, w))
A = te.placeholder((10, 10), name="A")
# Pass scalar inputs to the TensorIntrin, interleaved with tensor inputs
C = te.compute((10, 10), lambda i, j: intrin(i * i, A[i, j], i + j), name="C")
s = te.create_schedule(C.op)
stmt = tvm.lower(s, [A, C])["main"].body
assert isinstance(stmt.body.body, tvm.tir.Evaluate)
assert len(stmt.body.body.value.args) == 5
assert str(stmt.body.body.value.args[3]) == "(i: int32*i)"
assert str(stmt.body.body.value.args[4]) == "(i: int32 + j: int32)"
def test_legalize_invalid_attach():
A = te.compute((10, 10), lambda i, j: 1.0, name="A")
B = te.compute((10, 10), lambda i, j: A[i][j], name="B")
# Case 1: Split an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].split(B.op.axis[1], 2)
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.For)
# Case 2: Fuse an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].fuse(B.op.axis[0], B.op.axis[1])
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt, tvm.tir.stmt.For)
def test_compute_at():
def add():
shape = (16, 16)
A = tvm.te.compute(shape, lambda *i: 1.0, name="A")
B = tvm.te.compute(shape, lambda *i: 2.0, name="B")
C = tvm.te.compute(shape, lambda *i: A(*i) + B(*i), name="C")
return A, B, C
def invalid_compute_at_self():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[C].compute_at(s[C], C.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, [A, B], simple_mode=True)
def invalid_compute_at_loop():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[A].compute_at(s[C], C.op.axis[0])
s[C].compute_at(s[A], A.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, [C], simple_mode=True)
invalid_compute_at_self()
invalid_compute_at_loop()
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_fuse_with_split()
test_fuse_with_out_of_order_axis()
test_fuse_with_out_of_order_axis_with_reorder()
test_vectorize()
test_vectorize_commreduce()
test_legalize_invalid_attach()
test_compute_at()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_bound_inference.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_bound1():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule([A2.op])
xo, xi = s[A2].split(s[A2].op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 8
def test_bound2():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, yo, xi, yi = s[A2].tile(A2.op.axis[0], A2.op.axis[1], 8, 8)
# test normalize not affecting schedule
_ = s.normalize()
s[A1].compute_at(s[A2], yo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 8
assert bounds[A1.op.axis[1]].extent.value == 8
def test_bound3():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].set_scope("shared")
xo, xi = s[A2].split(A2.op.axis[0], 32)
xi0, xi1 = s[A2].split(xi, nparts=16)
s[A2].bind(xi0, te.thread_axis("threadIdx.x"))
yo, yi = s[A2].split(A2.op.axis[1], 16)
# test normalize not affecting schedule
_ = s.normalize()
s[A2].reorder(xo, xi0, yo, xi1, yi)
s[A1].compute_at(s[A2], yo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 32
assert bounds[A1.op.axis[1]].extent.value == 16
def test_bound_split_ext_less_than_factor():
m = 8
I = te.placeholder((m,), name="I")
EF = te.compute((m,), lambda i: I[i] * 2, name="EF")
E = te.compute((m,), lambda i: EF[i] * 2, name="E")
s = te.create_schedule([E.op])
xo, xi = s[E].split(s[E].op.axis[0], factor=32)
s[EF].compute_at(s[E], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xi].extent.value == m
def test_bound_split_ext_less_than_naprts():
m = 8
I = te.placeholder((m,), name="I")
EF = te.compute((m,), lambda i: I[i] * 2, name="EF")
E = te.compute((m,), lambda i: EF[i] * 2, name="E")
s = te.create_schedule([E.op])
xo, xi = s[E].split(s[E].op.axis[0], nparts=32)
s[EF].compute_at(s[E], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent.value == m
def test_bound_split_divisible():
m = te.var("m")
l = te.var("l")
A = te.placeholder((8 * m, l), name="A")
B = te.compute((8 * m, l), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], 8)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent == m
assert bounds[xi].extent.value == 8
def test_bound_tile_divisible():
m = te.var("m")
l = te.var("l")
shape = (8 * m, 32 * l)
A = te.placeholder(shape, name="A")
B = te.compute(shape, lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], 8, 32)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[xo].extent == m
assert bounds[xi].extent.value == 8
assert bounds[yo].extent == l
assert bounds[yi].extent.value == 32
def test_bound_fusesplit1():
m = te.var("m")
l = te.var("l")
split1 = te.var("s")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1])
xo, xi = s[A2].split(fused_axes, split1)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
idxdiv = tvm.tir.indexdiv
tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[0]].min, idxdiv(xo * split1, l))
expected_extent = idxdiv((xo + 1) * split1 - 1, l) - idxdiv(xo * split1, l) + 1
for i in range(1, 6):
for j in range(1, 6):
for k in range(1, 6):
vars = tvm.runtime.convert(
{
split1: tvm.tir.const(i, "int32"),
l: tvm.tir.const(j, "int32"),
xo.var: tvm.tir.const(k, "int32"),
}
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars),
tvm.tir.stmt_functor.substitute(expected_extent, vars),
)
tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[1]].extent, l)
def test_bound_fusesplit2():
m = te.var("m")
l = tvm.runtime.convert(6)
split = tvm.runtime.convert(3)
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1])
xo, xi = s[A2].split(fused_axes, split)
s[A1].compute_at(s[A2], xo)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
vars = tvm.runtime.convert({xo.var: tvm.tir.const(5, "int32")})
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].min, vars), 2
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].min, vars), 3
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars), 1
)
tvm.testing.assert_prim_expr_equal(
tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].extent, vars), 3
)
def test_bound_warp():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].set_scope("warp")
xo, xi = s[A2].split(A2.op.axis[0], 32)
xi0, xi1 = s[A2].split(xi, factor=16)
tx = te.thread_axis("threadIdx.x")
s[A2].bind(xi1, tx)
s[A2].bind(xi0, te.thread_axis("threadIdx.y"))
y = s[A2].op.axis[1]
s[A1].compute_at(s[A2], y)
xo, xi = s[A1].split(s[A1].op.axis[0], factor=16)
s[A1].bind(xi, tx)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[A1.op.axis[0]].extent.value == 16
def test_bound_scan():
m = te.var("m")
n = te.var("n")
X = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i])
s_scan = tvm.te.scan(s_init, s_update, s_state)
assert tuple(s_scan.shape) == (m, n)
s = te.create_schedule(s_scan.op)
XX = s.cache_read(X, "local", s_update)
xo, xi = s[s_update].split(s_update.op.axis[1], factor=4)
s[XX].compute_at(s[s_update], xo)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
assert bounds[XX.op.axis[1]].extent.value == 4
def test_bound_conv1d():
n = te.var("n")
A = te.compute((n + 2), lambda i: 1, name="A")
def computeB(ii):
i = ii + 1
return A[i - 1] + A[i] + A[i + 1]
B = te.compute(n, computeB, name="B")
s = te.create_schedule(B.op)
s[A].compute_at(s[B], B.op.axis[0])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A.op.axis[0]].extent.value == 3
def test_bound_blur():
n = tvm.runtime.convert(12)
A = te.compute((n, n), lambda i, j: 1, name="A")
def computeB(ii, jj):
# set the correct center
i = ii + 1
j = jj + 1
return A[i][j] + A[i - 1][j] + A[i + 1][j] + A[i][j + 1] + A[i][j - 1]
B = te.compute((n - 2, n - 2), computeB, name="B")
s = te.create_schedule(B.op)
s[A].compute_at(s[B], B.op.axis[1])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A.op.axis[0]].extent.value == 3
assert bounds[A.op.axis[1]].extent.value == 3
def test_bound_rfactor():
n = te.var("n")
A = te.placeholder((n,), name="A")
k = te.reduce_axis((0, n))
B = te.compute((1,), lambda i: te.sum(A[k], axis=k, where=(i > 1)), name="B")
# schedule
s = te.create_schedule(B.op)
kf, ki = s[B].split(k, nparts=4)
BF = s.rfactor(B, kf)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[BF.op.axis[0]].extent.value == 4
assert bounds[BF.op.axis[1]].extent.value == 1
def test_bound_group_schedule():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g = s.create_group(outputs=x1, inputs=x, include_inputs=True)
g.compute_at(s[x2], x2.op.axis[0])
assert s[x1].group == g
assert s[x].group == g
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[x.op.axis[0]].extent.value == 1
assert bounds[x.op.axis[1]].extent == n
def test_bound_nest_group():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1")
x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2")
s = te.create_schedule(x2.op)
g1 = s.create_group(outputs=x, inputs=x, include_inputs=True)
g2 = s.create_group(outputs=x1, inputs=x, include_inputs=True)
assert s[x].group == g1
assert s[x1].group == g2
g2.compute_at(s[x2], x2.op.axis[0])
g1.compute_at(s[x1], s[x1].op.axis[1])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[x.op.axis[0]].extent.value == 1
assert bounds[x.op.axis[1]].extent.value == 1
assert bounds[x1.op.axis[0]].extent.value == 1
assert bounds[x1.op.axis[1]].extent == n
def test_bound_nest_thread():
m = te.var("m")
A = te.placeholder((m), name="A")
A1 = te.compute((m,), lambda i: A[i], name="A1")
A2 = te.compute((m,), lambda i: A1[i] + 2, name="A2")
A3 = te.compute((m,), lambda i: A2[i] + 3, name="A3")
s = te.create_schedule(A3.op)
s[A2].set_scope("shared")
s[A1].set_scope("local")
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
bx, tx = s[A3].split(A3.op.axis[0], factor=32)
s[A3].bind(bx, block_x)
s[A3].bind(tx, thread_x)
s[A2].compute_at(s[A3], tx)
_, xi = s[A2].split(A2.op.axis[0], nparts=1)
s[A2].bind(xi, thread_x)
s[A1].compute_at(s[A3], tx)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[A1.op.axis[0]].extent.value == 1
assert bounds[A2.op.axis[0]].extent.value == 32
assert bounds[A3.op.axis[0]].extent == m
def test_gemm_bound():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n, n), name="A")
B = te.placeholder((n, n), name="B")
k = te.reduce_axis((0, n), name="k")
C = te.compute((n, n), lambda ii, jj: te.sum(A[ii, k] * B[jj, k], axis=k), name="CC")
# schedule
s = te.create_schedule(C.op)
xtile, ytile = 32, 32
scale = 8
num_thread = 8
block_factor = scale * num_thread
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_y = te.thread_axis("threadIdx.y")
CC = s.cache_write(C, "local")
AA = s.cache_read(A, "shared", [CC])
BB = s.cache_read(B, "shared", [CC])
by, yi = s[C].split(C.op.axis[0], factor=block_factor)
bx, xi = s[C].split(C.op.axis[1], factor=block_factor)
s[C].reorder(by, bx, yi, xi)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
ty, yi = s[C].split(yi, nparts=num_thread)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(ty, tx, yi, xi)
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
s[CC].compute_at(s[C], tx)
s[AA].compute_at(s[CC], k)
s[BB].compute_at(s[CC], k)
ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread)
tx, xi = s[AA].split(xi, nparts=num_thread)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread)
tx, xi = s[BB].split(xi, nparts=num_thread)
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
assert bounds[BB.op.axis[0]].extent.value == 64
assert bounds[AA.op.axis[0]].extent.value == 64
assert bounds[CC.op.axis[0]].extent.value == 8
assert bounds[CC.op.axis[1]].extent.value == 8
def test_bound_tensor_compute_op():
def intrin_test():
m1 = te.var("m1")
n1 = te.var("n1")
a = te.placeholder((m1, n1), name="a")
c = te.compute((1, n1), lambda i, j: a[0, j] + a[1, j] + a[2, j], name="c")
Ab = tvm.tir.decl_buffer(a.shape, name="Abuf", offset_factor=1)
Cb = tvm.tir.decl_buffer(c.shape, name="Cbuf", offset_factor=1)
def intrin_func(ins, outs):
aa = ins[0]
cc = outs[0]
def _body():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern("int32", "test", cc.access_ptr("w"), aa.access_ptr("r"))
)
return ib.get()
return _body()
return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, c: Cb})
test_func = intrin_test()
A = te.placeholder((20, 20), name="A")
B = te.compute(A.shape, lambda i, j: A[i, j], name="B")
C = te.compute((10, 20), lambda i: test_func(B[i:10, 0:20]), name="C")
s = te.create_schedule(C.op)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
assert bounds[B.op.axis[0]].extent.value == 10
def test_bound_simplification_failure():
# Check that the bounds are not expanded
A = te.compute((2,), lambda j: j, "A")
def _check(B, A=A):
s = te.create_schedule(B.op)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.lower(s, [B, A], simple_mode=True)
if not bounds[A.op.axis[0]].extent.value <= 2:
print(stmt)
assert bounds[A.op.axis[0]].extent.value <= 2
tdiv = tvm.tir.truncdiv
# These are hard to simplify, moreover we don't simplify them
_check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.min(-3 * i, -2 * i)]))
_check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.max(-3 * i, -4 * i)]))
_check(te.compute((10,), lambda i: A[-2 * tdiv(i, 2) - tvm.te.min(i, 0 - i)]))
_check(te.compute((10,), lambda i: A[i + (0 - i)]))
# This would cause out of bounds, but we nevertheless include it
_check(te.compute((10,), lambda i: A[i]))
if __name__ == "__main__":
test_bound_nest_thread()
test_bound1()
test_bound_nest_group()
test_bound_group_schedule()
test_bound_scan()
test_bound3()
test_bound_rfactor()
test_bound_blur()
test_bound_conv1d()
test_bound2()
test_gemm_bound()
test_bound_warp()
test_bound_tensor_compute_op()
test_bound_simplification_failure()
test_bound_fusesplit1()
test_bound_fusesplit2()
test_bound_split_divisible()
test_bound_tile_divisible()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_bound_inference_tiling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_bound_tile_mod():
def compute(M_tiles, N_tiles, factor, dtype):
# Algo
M = M_tiles * factor
N = N_tiles * factor
A = tvm.te.placeholder((N, M), name="A", dtype=dtype)
C = tvm.te.compute((N, M), lambda n, m: A[n, m], name="C")
s = tvm.te.create_schedule(C.op)
return s, A, C
def schedule(s, factor, padding, A, C):
C_local = s.cache_write(C, "local")
n, m = C.op.axis
bn, bm, ni, mi = s[C].tile(n, m, factor, factor)
nio, nii = s[C].split(ni, 2)
n = s[C].fuse(nii, mi)
C_shared = s.cache_write(C, "shared")
bn, bm, ni, mi = C_shared.op.axis
s[C_shared].storage_align(ni, factor * 2, padding)
n, m = s[C].op.axis
bn, bm, ni, mi = s[C].tile(n, m, factor, factor)
s[C].set_scope("global")
niio, niii = s[C].split(ni, 32)
s[C_shared].compute_at(s[C], niio)
return s
s, A, C = compute(2, 2, 128, "float16")
s = schedule(s, 128, 8, A, C)
bounds = tvm.te.schedule.InferBound(s)
check = bounds[s.stages[2].op.axis[2]].extent == 16
if not check:
print(tvm.lower(s, [A, C], simple_mode=True))
assert check
if __name__ == "__main__":
test_bound_tile_mod()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i], name="s_init")
x_trans = te.compute((m, n), lambda i, j: x[i, j] + 1, name="x_trans")
s_up1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + 1, name="up1")
s_update = te.compute((m, n), lambda t, i: s_up1[t, i] + x_trans[t, i], name="update")
s_scan = tvm.te.scan(s_init, s_update, s_state)
def test_getbody():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
assert set(body) == set([s_scan.op, s_update.op, s_up1.op])
def test_attach_path():
s = te.create_schedule(s_scan.op)
s[x_trans].compute_at(s[s_update], s_update.op.axis[0])
apath = tvm.te.schedule.CreateAttachPath(s)
assert tuple(apath[s_update.op]) == tuple([s_scan.op.scan_axis])
assert tuple(apath[x_trans.op]) == tuple([s_update.op.axis[0], s_scan.op.scan_axis])
def test_fix_pt():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.spatial_axis_[0]].value != 0
def test_scan_fix_point():
m = te.var("m")
n = te.var("n")
l = te.var("l")
x = te.compute((l, m, n), lambda *i: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((l, m, n))
s_init = te.compute((1, m, n), lambda _, i, j: x[0, i, j], name="s_init")
def test_scan0():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 1
def test_scan1():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, j, i], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan3_not_exact_reach():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, i, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, 10] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan4_reach_other():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, j, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, j] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan5_multi_output():
m = te.var("m")
n = te.var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
body = tvm.te.schedule.ScanGetBody(r0.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(r0.op)
assert fxpt[r1.op.spatial_axis_[0]].value == 1
test_scan0()
test_scan1()
test_scan3_not_exact_reach()
test_scan4_reach_other()
test_scan5_multi_output()
def test_create_read_graph():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j])
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3)
g = tvm.te.schedule.CreateReadGraph([A2.op])
assert g[A2.op][0] == A1
assert g[A1.op][0] == A
post_order = tvm.te.schedule.PostDFSOrder([A2.op], g)
assert post_order[0] == A.op
assert post_order[1] == A1.op
if __name__ == "__main__":
test_scan()
test_create_read_graph()
test_scan_fix_point()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_lstm_cell_inline():
num_step = 128
num_input = 256
num_hidden = 1152
batch_size = 4
# Global transition matrix
X = te.placeholder((num_step - 1, batch_size, num_input), name="X")
Wi2h = te.placeholder((4, num_hidden, num_input), name="Wi2h")
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
# h: output hidden state, c: cell state.
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
# LSTM transition
k = te.reduce_axis((0, num_input), name="ki2h")
s_i2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(X[t - 1, i, k] * Wi2h[x, j, k], axis=k),
name="s_i2h",
)
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
# Gate rules
gates = te.compute(s_i2h.shape, lambda *i: s_i2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 0, i, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, 1, i, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, 2, i, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 3, i, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
# schedule
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[X],
name="lstm_scan",
)
# schedule
s = te.create_schedule(scan_h.op)
# Inline gate computations
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
# verify we can lower correctly
tvm.lower(s, [X, Wi2h, Wh2h, scan_h, scan_c])
if __name__ == "__main__":
test_lstm_cell_inline()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm.driver.build_module import schedule_to_module
def test_const():
x = tvm.te.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_schedule0():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
s = te.create_schedule(A1.op)
mod = schedule_to_module(s, [A, A1])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule1():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
s = te.create_schedule(A1.op)
xo, xi = s[A1].split(A1.op.axis[0], 8)
s[A1].pragma(xo, "auto_unroll_max_step", 10)
mod = schedule_to_module(s, [A, A1])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule2():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
mod = schedule_to_module(s, [A, A2])
assert isinstance(mod["main"], tvm.tir.PrimFunc)
def test_schedule_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i])
res = tvm.te.scan(s_init, s_update, s_state)
assert tuple(res.shape) == (m, n)
s = te.create_schedule(res.op)
s = s.normalize()
ir = tvm.lower(s, [s_state], simple_mode=True)
bounds = tvm.te.schedule.InferBound(s)
assert bounds[res.op.scan_axis].min.value == 1
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_inline_multi_reduce():
def argmax_comp(x, y):
idx = tvm.tir.Select((x[1] >= y[1]), x[0], y[0])
val = tvm.tir.Select((x[1] >= y[1]), x[1], y[1])
return idx, val
def argmax_init(idx_typ, val_typ):
return tvm.tir.const(-1, idx_typ), tvm.te.min_value(val_typ)
argmax = te.comm_reducer(argmax_comp, argmax_init, name="argmax")
m = te.var("m")
n = te.var("n")
val = te.placeholder((m, n), name="val", dtype="float32")
val1 = te.compute((m, n), lambda i, j: val[i, j] + 1, name="val1")
val2 = te.compute((m, n), lambda i, j: te.exp(val1[i, j]), name="val2")
k = te.reduce_axis((0, n), "k")
T_idx, T_val = te.compute((m,), lambda i: argmax((k.var, val2[i, k]), axis=k), name="T")
s = te.create_schedule(T_idx.op)
s[val1].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_auto_inline():
def elemwise():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="T1")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), T1
def broadcast():
m = te.var("m")
n = te.var("n")
A = te.placeholder((1,), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(0) * B(i, j), name="T1", tag="broadcast")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), T1
def injective():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m,), name="A")
B = te.placeholder((m, n), name="B")
C = te.placeholder((m, n), name="C")
T1 = te.compute((m, n), lambda i, j: A(i) * B(i, j), name="T1")
T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2")
return te.create_schedule(T2.op), T1
def check_auto_inline(schedule_func, auto_inline_func):
s, T1 = schedule_func()
# before auto inline the attach type is AttachType.kGroupRoot
assert s[T1].attach_type == 1
auto_inline_func(s)
# after auto inline the attach type is AttachType.kInline
assert s[T1].attach_type == 2
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
check_auto_inline(elemwise, tvm.te.schedule.AutoInlineElemWise)
check_auto_inline(broadcast, tvm.te.schedule.AutoInlineBroadcast)
check_auto_inline(injective, tvm.te.schedule.AutoInlineInjective)
def test_schedule_const_bound():
n = 128
A = te.placeholder((n,), name="A")
A1 = te.compute((n,), lambda i: A[i] + 1, name="A1")
s = te.create_schedule(A1.op)
xo, xi = s[A1].split(A1.op.axis[0], 8)
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_inline_mixed():
n = te.var("n")
A = te.placeholder((n,), name="A")
A1 = te.compute(A.shape, lambda *i: A(*i) + 1, name="A1")
A2 = te.compute(A.shape, lambda *i: A1(*i) + 2, name="A2")
C = te.compute((n,), lambda i: A2[i] + A1[i], name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=8)
s[A1].compute_at(s[C], xo)
s[A2].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def check(x):
if isinstance(x, tvm.tir.Call):
assert x.func != A2
tvm.tir.stmt_functor.post_order_visit(s[C].op.body[0], check)
def test_scan_inline1():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state1 = te.placeholder((m, n))
s_state2 = te.placeholder((m, n))
s_init1 = te.compute((1, n), lambda _, i: x[0, i])
s_init2 = te.compute((1, n), lambda _, i: x[0, i])
s_x1 = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="x1")
s_x2 = te.compute((m, n), lambda t, i: s_state2[t - 1, i] + 1, name="x2")
s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1")
s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2")
res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2])
s = te.create_schedule(res1.op)
s[s_x1].compute_inline()
stmt = tvm.lower(s, [x, res1, res2])
def test_scan_inline2():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state1 = te.placeholder((m, n))
s_state2 = te.placeholder((m, n))
s_init1 = te.compute((1, n), lambda _, i: x[0, i])
s_init2 = te.compute((1, n), lambda _, i: x[0, i])
s_xx = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="xx")
s_x1 = te.compute((m, n), lambda t, i: s_xx[t, i] + 1, name="x1")
s_x2 = te.compute((m, n), lambda t, i: s_xx[t, i] + s_state2[t - 1, 2], name="x2")
s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1")
s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2")
res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2])
s = te.create_schedule(res1.op)
s[s_xx].compute_inline()
s[s_x1].compute_inline()
s[s_x2].compute_inline()
stmt = tvm.lower(s, [x, res1, res2])
def test_schedule_cache():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "shared", readers=[C])
CC = s.cache_write(C, "shared")
s[AA].compute_at(s[CC], CC.op.axis[0])
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_middle_cache():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C")
D = te.compute((m, n), lambda i, j: C(i, j), name="D")
s = te.create_schedule(D.op)
AA = s.cache_read(A, "local", readers=[C])
BB = s.cache_read(B, "local", readers=[C])
CC = s.cache_read(C, "local", readers=[D])
DD = s.cache_write(D, "local")
# s[AA].compute_at(s[CC], CC.op.axis[0])
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout1():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
s[C].reorder(C.op.axis[1], C.op.axis[0])
CC = s.cache_write(C, "global")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout2():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A")
B = te.placeholder((m * 4, n), name="B")
C = te.compute(A.shape, lambda i, j: A(i, j) * B(i, j), name="C")
s = te.create_schedule(C.op)
x, y = C.op.axis
xo, xi = s[C].split(x, factor=4)
s[C].reorder(xo, y, xi)
CC = s.cache_write(C, "global")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout3():
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A")
B = te.placeholder((m * 4, n), name="B")
k = te.reduce_axis((0, n), "k")
C = te.compute((A.shape[0],), lambda i: te.sum(A(i, k) * B(i, k), axis=k), name="C")
s = te.create_schedule(C.op)
x = C.op.axis[0]
xo, xi = s[C].split(x, factor=4)
CC = s.cache_write(C, "global")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_cache_relayout4():
def _compute(*indice):
return A(*indice) + 1, B(*indice) / 2
m = te.var("m")
n = te.var("n")
A = te.placeholder((m * 4, n), name="A")
B = te.placeholder((m * 4, n), name="B")
C1, C2 = te.compute(A.shape, _compute, name="C")
s = te.create_schedule([C1.op, C2.op])
C1_cache, C2_cache = s.cache_write([C1, C2], "local")
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def intrin_gemv(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemm", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
reset = tvm.tir.call_packed("fill_zero", zz_ptr, n)
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, reset, update
buffer_params = {"data_alignment": 16, "offset_factor": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def test_schedule_tensor_compute1():
# basic: split, reorder, tile
M, N, L = 2048, 1024, 512
factor, rfactor = 16, 16
A = te.placeholder((N // factor, L // rfactor, factor, rfactor), name="A")
B = te.placeholder((M, L // rfactor, rfactor), name="B")
k = te.reduce_axis((0, L // rfactor), name="k")
gemv = intrin_gemv(factor, rfactor)
C = te.compute(
(N, M // factor, factor),
lambda i, j: gemv(A[i, k, 0:factor, 0:factor], B[j, k, 0:rfactor], reduce_axis=k),
name="C",
)
s = te.create_schedule(C.op)
ai, aj, ax = s[C].op.axis
aio, aii = s[C].split(ai, 16)
s[C].reorder(aio, aj, aii)
aioo, ajo, aioi, aji = s[C].tile(aio, aj, 16, 4)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def intrin_vadd(n, cache_read=False, cache_write=False):
scope_ubuf = "local"
dtype = "float32"
x = te.placeholder((n,), dtype=dtype, name="vx")
y = te.placeholder((n,), dtype=dtype, name="vy")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
def create_buffer(t):
return tvm.tir.decl_buffer(
t.shape, t.dtype, name="W" + t.name, scope=scope_ubuf, offset_factor=16
)
binds = {}
if cache_read:
binds[x] = create_buffer(x)
binds[y] = create_buffer(y)
if cache_write:
binds[z] = create_buffer(z)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
outs[0].dtype,
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(
z.op, intrin_func, binds=binds, default_buffer_params={"offset_factor": 16}
)
def test_schedule_tensor_compute2():
# cache_read, cache_write
M = 1024
factor = 16
dtype = "float32"
scope_ubuf = "local"
A = te.placeholder((M // factor, factor), name="A", dtype=dtype)
B = te.placeholder((M // factor, factor), name="B", dtype=dtype)
vadd = intrin_vadd(factor, True, True)
C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]), name="C")
s = te.create_schedule(C.op)
AL = s.cache_read(A, scope_ubuf, C)
BL = s.cache_read(B, scope_ubuf, C)
CL = s.cache_write(C, scope_ubuf)
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_schedule_tensor_compute3():
# compute_at
M = 1024
factor = 16
dtype = "float32"
A = te.placeholder((M // factor, factor), name="A", dtype=dtype)
B = te.placeholder((M // factor, factor), name="B", dtype=dtype)
Bi = te.compute((M // factor, factor), lambda i, j: B[i, j] + 5, name="Bi")
vadd = intrin_vadd(factor)
C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], Bi[i, 0:factor]), name="C")
s = te.create_schedule(C.op)
s[Bi].compute_at(s[C], C.op.axis[0])
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_loop_dep_reduce():
X = te.placeholder(shape=(10,), name="x")
def f(n):
rv = te.reduce_axis((0, n))
return te.sum(X[rv], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
f = tvm.build(s, [X, Y])
def test_loop_dep_reduce_cache_write():
X = te.placeholder(shape=(10,), name="x")
def f(n):
rv = te.reduce_axis((0, n))
init = lambda dtype: tvm.tir.Select(n > 1, tvm.tir.const(0, dtype), n.astype(dtype))
sum = te.comm_reducer(lambda x, y: tvm.te.max(x + y, n.astype("float32")), init, name="sum")
return sum(X[rv], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
s.cache_write(Y, "local")
f = tvm.build(s, [X, Y])
def test_reduction_and_dummy_fuse_split():
n = 10
X = te.placeholder(shape=(n,), dtype="int32", name="X")
k = te.reduce_axis((0, n))
Y = te.compute((), lambda: te.sum(X[k], k), name="Y")
s = te.create_schedule([Y.op])
ax = s[Y.op].fuse(*Y.op.axis)
axo, axi = s[Y.op].split(ax, nparts=20)
f = tvm.build(s, [Y, X])
args = [tvm.nd.empty((), "int32")] + [tvm.nd.array(np.ones((n,), dtype="int32"))]
f(*args)
assert args[0].numpy() == n
n = 10
X = te.placeholder(shape=(n,), dtype="int32", name="X")
k = te.reduce_axis((0, n))
Y = te.compute((n,), lambda i: te.sum(X[k], k), name="Y")
s = te.create_schedule([Y.op])
ax = s[Y.op].fuse(*(list(Y.op.axis) + list(Y.op.reduce_axis)))
f = tvm.build(s, [Y, X])
args = [tvm.nd.array(np.ones((n,), dtype="int32"))] + [
tvm.nd.array(np.ones((n,), dtype="int32"))
]
f(*args)
assert np.all(args[0].numpy() == n)
def test_schedule_compute_inline():
shape = [10, 1024]
A = te.placeholder(shape, name="A")
B = te.placeholder(shape, name="B")
C = te.compute(shape, lambda *index: A(*index) + B(*index), name="C")
def _compute(*index):
return C(*index), C(*index) * B(*index)
F, E = te.compute(shape, _compute, name="F")
s = te.create_schedule([F.op, E.op])
AL = s.cache_read(A, "local", [C])
BL = s.cache_read(B, "local", [C, E])
CL = s.cache_write(C, "local")
FL, EL = s.cache_write([F, E], "local")
s[C].compute_inline()
s = s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def test_local_stage_predicate():
m = 1
n = 3
p = 2
A = tvm.te.placeholder((m, n, p), name="A")
B = tvm.te.compute((m, n, p), lambda bi, bj, bk: A[bi, bj, bk], name="B")
C = tvm.te.compute((m, n, p), lambda ci, cj, ck: B[ci, cj, ck], name="C")
by = tvm.te.thread_axis("blockIdx.y")
tx = tvm.te.thread_axis("threadIdx.x")
vx = tvm.te.thread_axis("vthread")
def schedule(thread_tag, mem_scope):
s = tvm.te.create_schedule(C.op)
s[B].compute_at(s[C], s[C].op.axis[0])
s[B].set_scope(mem_scope)
bno, bni = s[B].split(s[B].op.axis[1], n)
bx = tvm.te.thread_axis("blockIdx.x")
s[C].bind(s[C].op.axis[0], bx)
s[C].bind(s[C].op.axis[1], thread_tag)
s[B].bind(bni, thread_tag)
return s
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
# local vs. threadIdx
s = schedule(tx, "local")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
# local vs. vthread
s = schedule(vx, "local")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
# shared vs. blockIdx
s = schedule(by, "shared")
lowered_body = tvm.lower(s, [A, C])["main"].body
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_local_stage_predicate2():
A = tvm.te.placeholder((128,), name="A")
B = tvm.te.compute((128,), lambda bi: A[bi] + 1, name="B")
C = tvm.te.compute((128,), lambda ci: B[ci] + 2, name="C")
s = tvm.te.create_schedule(C.op)
AA = s.cache_read(A, "local", [B])
s[B].set_scope("shared")
block_x = tvm.te.thread_axis("blockIdx.x")
thread_x = tvm.te.thread_axis((0, 32), "threadIdx.x")
oc, ic = s[C].split(s[C].op.axis[0], factor=64)
ooc, ioc = s[C].split(oc, factor=2)
oic, iic = s[C].split(ic, factor=32)
s[C].bind(ooc, block_x)
s[C].bind(iic, thread_x)
s[B].compute_at(s[C], ioc)
ob, ib = s[B].split(s[B].op.axis[0], factor=32)
s[B].bind(ib, thread_x)
s[AA].compute_root()
s[AA].compute_at(s[C], ooc)
oaa, iaa = s[AA].split(s[AA].op.axis[0], factor=32)
s[AA].bind(iaa, thread_x)
lowered_body = tvm.lower(s, [A, C])["main"].body
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
def visit_stmt(op):
if isinstance(op, tvm.tir.Allocate):
return op.extents[0].value == 97
return False
assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse)))
assert any(collect_visit(lowered_body, visit_stmt))
if __name__ == "__main__":
test_loop_dep_reduce()
test_loop_dep_reduce_cache_write()
test_schedule_middle_cache()
test_inline_multi_reduce()
test_schedule_cache_relayout4()
test_schedule_cache_relayout3()
test_schedule_cache_relayout2()
test_schedule_cache_relayout1()
test_schedule_const_bound()
test_scan_inline1()
test_scan_inline2()
test_inline_mixed()
test_auto_inline()
test_schedule_scan()
test_schedule0()
test_schedule1()
test_schedule2()
test_schedule_cache()
test_schedule_tensor_compute1()
test_schedule_tensor_compute2()
test_schedule_tensor_compute3()
test_reduction_and_dummy_fuse_split()
test_schedule_compute_inline()
test_local_stage_predicate()
test_local_stage_predicate2()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
import tvm.testing
def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96):
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m), lambda i, j: te.sum(A[i, k].astype("float32") * B[k, j].astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 4
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(yo, xo, tz, ty, tx, yi, xi)
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[0], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[0], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(l, m)).astype(B.dtype)
c_np = np.zeros((n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3))
c_np = np.dot(a_np, b_np)
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2):
A = te.placeholder((batch, n, l), name="A", dtype="float16")
B = te.placeholder((batch, l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k)
)
s = te.create_schedule(C.op)
z, y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CL = s.cache_write(C, "local")
bx = 2
by = 32
step_k = 8
v = 4
TX = 8
TY = 1
tile_x = bx * TX
tile_y = by * TY
WX = min(warp_tile_m, tile_x)
tile_k = 16
vthread = 1
yo, ty = s[C].split(y, tile_y * vthread)
vy, ty = s[C].split(ty, tile_y)
ty, yi = s[C].split(ty, TY)
xo, xi = s[C].split(x, tile_x)
tz, xi = s[C].split(xi, WX)
tx, xi = s[C].split(xi, TX)
ko, ki = s[CL].split(k, step_k * tile_k)
kl, ki = s[CL].split(ki, tile_k)
s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi)
s[C].bind(z, te.thread_axis("blockIdx.z"))
s[C].bind(yo, te.thread_axis("blockIdx.y"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(ty, te.thread_axis("threadIdx.y"))
s[C].bind(tz, te.thread_axis("threadIdx.z"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy"))
s[CL].compute_at(s[C], tx)
zo, yo, xo = CL.op.axis
s[CL].reorder(ko, kl, ki, zo, yo, xo)
s[AA].compute_at(s[CL], ko)
xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v)
tz, tx = s[AA].split(xi, factor=(WX // TX) * v)
tx, vec = s[AA].split(tx, factor=v)
fused = s[AA].fuse(s[AA].op.axis[1], xo)
_, ty = s[AA].split(fused, factor=by)
s[AA].bind(ty, te.thread_axis("threadIdx.y"))
s[AA].bind(tz, te.thread_axis("threadIdx.z"))
s[AA].bind(tx, te.thread_axis("threadIdx.x"))
s[AA].vectorize(vec)
s[BB].compute_at(s[CL], ko)
xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v)
tz, tx = s[BB].split(xi, factor=(WX // TX) * v)
tx, vec = s[BB].split(tx, factor=v)
fused = s[BB].fuse(s[BB].op.axis[1], xo)
_, ty = s[BB].split(fused, factor=by)
s[BB].bind(ty, te.thread_axis("threadIdx.y"))
s[BB].bind(tz, te.thread_axis("threadIdx.z"))
s[BB].bind(tx, te.thread_axis("threadIdx.x"))
s[BB].vectorize(vec)
s[AL].compute_at(s[CL], kl)
s[BL].compute_at(s[CL], kl)
s[CL].pragma(ko, "tensor_core")
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype)
b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype)
c_np = np.zeros((batch, n, m), dtype=np.float32)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print(
"batch gemm m=%d n=%d k=%d batch=%d: %f ms"
% (m, n, l, batch, evaluator(a, b, c).mean * 1e3)
)
for bs in range(batch):
c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :])
np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3)
@tvm.testing.requires_tensorcore
def test_tensor_core_matmul():
tensor_core_matmul(16) # test with warp_tile 16x16x16
tensor_core_matmul(8) # test with warp_tile 8x32x16
tensor_core_matmul(32) # test with warp_tile 32x8x16
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_matmul():
tensor_core_batch_matmul()
if __name__ == "__main__":
test_tensor_core_matmul()
test_tensor_core_batch_matmul()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_tensor_core.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm.topi.testing import conv2d_nhwc_python
import tvm.testing
VERIFY = True
def intrin_wmma_load_matrix(shape, scope):
n, m, l = shape
if scope == "wmma.matrix_a":
row, col = n, l
elif scope == "wmma.matrix_b":
row, col = l, m
A = te.placeholder((row, col), name="A", dtype="float16")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=row * col
)
C = te.compute((row, col), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=row * col
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_load_matrix_sync",
BC.data,
n,
m,
l,
BC.elem_offset // (row * col),
BA.access_ptr("r"),
col,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
def intrin_wmma_gemm(shape):
n, m, l = shape
A = te.placeholder((n, l), name="A", dtype="float16")
B = te.placeholder((l, m), name="B", dtype="float16")
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k),
name="C",
)
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=n * l
)
BB = tvm.tir.decl_buffer(
B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=l * m
)
BC = tvm.tir.decl_buffer(
C.shape,
C.dtype,
name="BC",
scope="wmma.accumulator",
data_alignment=32,
offset_factor=n * m,
)
def intrin_func(ins, outs):
BA, BB = ins
(BC,) = outs
def init():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_fill_fragment",
BC.data,
n,
m,
l,
BC.elem_offset // (n * m),
0.0,
)
)
return ib.get()
def update():
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_mma_sync",
BC.data,
BC.elem_offset // (n * m),
BA.data,
BA.elem_offset // (n * l),
BB.data,
BB.elem_offset // (l * m),
BC.data,
BC.elem_offset // (n * m),
)
)
return ib.get()
return update(), init(), update()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC})
def intrin_wmma_store_matrix(shape):
n, m, l = shape
A = te.placeholder((n, m), name="A", dtype="float32")
BA = tvm.tir.decl_buffer(
A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=n * m
)
C = te.compute((n, m), lambda i, j: A[i, j], name="C")
BC = tvm.tir.decl_buffer(
C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=n * m
)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
BA = ins[0]
BC = outs[0]
ib.emit(
tvm.tir.call_intrin(
"handle",
"tir.tvm_store_matrix_sync",
BA.data,
n,
m,
l,
BA.elem_offset // (n * m),
BC.access_ptr("w"),
m,
"row_major",
)
)
return ib.get()
return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_matmal():
batch_size = 4
n = 512
m, l = n, n
assert n % 32 == 0
assert m % 8 == 0
assert l % 16 == 0
nn, mm, ll = n // 32, m // 8, l // 16
A = te.placeholder((batch_size, nn, ll, 32, 16), name="A", dtype="float16")
B = te.placeholder((batch_size, ll, mm, 16, 8), name="B", dtype="float16")
k1 = te.reduce_axis((0, ll), name="k1")
k2 = te.reduce_axis((0, 16), name="k2")
C = te.compute(
(batch_size, nn, mm, 32, 8),
lambda b, i, j, ii, jj: te.sum(
A[b, i, k1, ii, k2].astype("float") * B[b, k1, j, k2, jj].astype("float"), axis=[k1, k2]
),
name="Fragment_C",
)
s = te.create_schedule(C.op)
warp_size = 32
kernel_size = 16
block_row_warps = 2
block_col_warps = 4
warp_row_tiles = 4
warp_col_tiles = 2
chunk = 4
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
AS = s.cache_read(A, "shared", [C])
BS = s.cache_read(B, "shared", [C])
AF = s.cache_read(AS, "wmma.matrix_a", [C])
BF = s.cache_read(BS, "wmma.matrix_b", [C])
CF = s.cache_write(C, "wmma.accumulator")
b, i, j, kernel_i, kernel_j = s[C].op.axis
i, ii = s[C].split(i, factor=warp_row_tiles)
block_i, i = s[C].split(i, factor=block_row_warps)
j, jj = s[C].split(j, factor=warp_col_tiles)
block_j, j = s[C].split(j, factor=block_col_warps)
s[C].reorder(block_i, block_j, i, j, ii, jj, kernel_i, kernel_j)
s[C].bind(b, block_z)
s[C].bind(block_i, block_x)
s[C].bind(block_j, block_y)
s[C].bind(i, thread_y)
s[C].bind(j, thread_z)
s[CF].compute_at(s[C], j)
b, warp_i, warp_j, _i, _j = s[CF].op.axis
k, _k = CF.op.reduce_axis
ko, ki = s[CF].split(k, factor=chunk)
s[CF].reorder(ko, ki, warp_i, warp_j, _i, _j, _k)
s[AF].compute_at(s[CF], ki)
s[BF].compute_at(s[CF], ki)
s[AS].compute_at(s[CF], ko)
b, xo, yo, xi, yi = AS.op.axis
tx, xo = s[AS].split(xo, nparts=block_row_warps)
ty, yo = s[AS].split(yo, nparts=block_col_warps)
t = s[AS].fuse(xi, yi)
to, ti = s[AS].split(t, nparts=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(to, thread_x)
s[BS].compute_at(s[CF], ko)
b, xo, yo, xi, yi = BS.op.axis
tx, xo = s[BS].split(xo, nparts=block_row_warps)
ty, yo = s[BS].split(yo, nparts=block_col_warps)
t = s[BS].fuse(xi, yi)
to, ti = s[BS].split(t, nparts=warp_size)
s[BS].bind(tx, thread_y)
s[BS].bind(ty, thread_z)
s[BS].bind(to, thread_x)
s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_a"))
s[BF].tensorize(BF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_b"))
s[C].tensorize(kernel_i, intrin_wmma_store_matrix((32, 8, 16)))
s[CF].tensorize(_i, intrin_wmma_gemm((32, 8, 16)))
func = tvm.build(s, [A, B, C], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(batch_size, nn, ll, 32, 16)).astype(A.dtype)
b_np = np.random.uniform(size=(batch_size, ll, mm, 16, 8)).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((batch_size, nn, mm, 32, 8), dtype=C.dtype), dev)
func(a, b, c)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("gemm with tensor core: %f ms" % (evaluator(a, b, c).mean * 1e3))
if VERIFY:
func(a, b, c)
a_np = a_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
b_np = b_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
c_np = c.numpy().transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n)
np.testing.assert_allclose(
c_np, np.matmul(a_np.astype(C.dtype), b_np.astype(C.dtype)), rtol=1e-4, atol=1e-4
)
@tvm.testing.requires_tensorcore
def test_tensor_core_batch_conv():
# The sizes of inputs and filters
batch_size = 32
height = 14
width = 14
in_channels = 32
out_channels = 64
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
block_size = 16
block_row_warps = 2
block_col_warps = 4
warp_row_tiles = 4
warp_col_tiles = 2
warp_size = 32
chunk = 2
# Input feature map: (N, H, W, IC, n, ic)
data_shape = (
batch_size // block_size,
height,
width,
in_channels // block_size,
block_size,
block_size,
)
# Kernel: (H, W, IC, OC, ic, oc)
kernel_shape = (
kernel_h,
kernel_w,
in_channels // block_size,
out_channels // block_size,
block_size,
block_size,
)
# Output feature map: (N, H, W, OC, n, oc)
output_shape = (
batch_size // block_size,
height,
width,
out_channels // block_size,
block_size,
block_size,
)
assert batch_size % block_size == 0
assert in_channels % block_size == 0
assert out_channels % block_size == 0
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // block_size), name="ic")
ii = te.reduce_axis((0, block_size), name="ii")
# Algorithm
A = te.placeholder(data_shape, name="A", dtype="float16")
W = te.placeholder(kernel_shape, name="W", dtype="float16")
Apad = te.compute(
(
batch_size // block_size,
height + 2 * pad_h,
width + 2 * pad_w,
in_channels // block_size,
block_size,
block_size,
),
lambda n, h, w, i, nn, ii: tvm.tir.if_then_else(
tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width),
A[n, h - pad_h, w - pad_w, i, nn, ii],
tvm.tir.const(0.0, "float16"),
),
name="Apad",
)
Conv = te.compute(
output_shape,
lambda n, h, w, o, nn, oo: te.sum(
Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32")
* W[kh, kw, ic, o, ii, oo].astype("float32"),
axis=[ic, kh, kw, ii],
),
name="Conv",
)
s = te.create_schedule(Conv.op)
s[Apad].compute_inline()
AS = s.cache_read(Apad, "shared", [Conv])
WS = s.cache_read(W, "shared", [Conv])
AF = s.cache_read(AS, "wmma.matrix_a", [Conv])
WF = s.cache_read(WS, "wmma.matrix_b", [Conv])
ConvF = s.cache_write(Conv, "wmma.accumulator")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
nc, hc, wc, oc, nnc, ooc = Conv.op.axis
block_k = s[Conv].fuse(hc, wc)
s[Conv].bind(block_k, block_z)
nc, nci = s[Conv].split(nc, factor=warp_row_tiles)
block_i, nc = s[Conv].split(nc, factor=block_row_warps)
oc, oci = s[Conv].split(oc, factor=warp_col_tiles)
block_j, oc = s[Conv].split(oc, factor=block_col_warps)
s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
s[Conv].bind(block_i, block_x)
s[Conv].bind(block_j, block_y)
s[Conv].bind(nc, thread_y)
s[Conv].bind(oc, thread_z)
s[ConvF].compute_at(s[Conv], oc)
n, h, w, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
s[WS].compute_at(s[ConvF], kh)
s[AS].compute_at(s[ConvF], kh)
n, h, w, i, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, yo = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, factor=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(ti, thread_x)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, yo = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix((16, 16, 16), "wmma.matrix_a"))
s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix((16, 16, 16), "wmma.matrix_b"))
s[Conv].tensorize(nnc, intrin_wmma_store_matrix((16, 16, 16)))
s[ConvF].tensorize(nnf, intrin_wmma_gemm((16, 16, 16)))
func = tvm.build(s, [A, W, Conv], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=data_shape).astype(A.dtype)
w_np = np.random.uniform(size=kernel_shape).astype(W.dtype)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), dev)
evaluator = func.time_evaluator(func.entry_name, dev, number=3)
print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3))
if VERIFY:
func(a, w, c)
a_np = a_np.transpose(0, 4, 1, 2, 3, 5).reshape(batch_size, height, width, in_channels)
w_np = w_np.transpose(0, 1, 2, 4, 3, 5).reshape(
kernel_h, kernel_w, in_channels, out_channels
)
c_np = (
c.numpy().transpose((0, 4, 1, 2, 3, 5)).reshape(batch_size, height, width, out_channels)
)
c_std = conv2d_nhwc_python(
a_np.astype(Conv.dtype), w_np.astype(Conv.dtype), (stride_h, stride_w), (pad_h, pad_w)
).astype(Conv.dtype)
np.testing.assert_allclose(c_np, c_std, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_tensor_core_batch_matmal()
test_tensor_core_batch_conv()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_schedule_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def intrin_vadd(xo, m, n):
x = te.placeholder((n,), name="vx")
y = te.placeholder((n,), name="vy")
if m % n == 0:
body = lambda i: x[i] + y[i]
else:
body = lambda i: tvm.tir.Select(
xo * n + i < m, x[i] + y[i], tvm.tir.const(0, dtype=x.dtype)
)
z = te.compute(x.shape, body, name="z")
def intrin_func(ins, outs):
xx, yy = ins
zz = outs[0]
# special handle needed to tackle tail loop part when m % n != 0
# here is tvm.min(n, m - xo * n)
return tvm.tir.call_packed("vadd", xx, yy, zz)
buffer_params = {"offset_factor": 16}
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params=buffer_params)
def intrin_gemv(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
reset = tvm.tir.call_packed("fill_zero", zz_ptr, n)
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, reset, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def intrin_gemv_no_reset(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, None, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def test_tensorize_vadd():
def add(m):
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
return x, y, z
def check(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=factor)
vadd = intrin_vadd(xo, m, factor)
s[z].tensorize(xi, vadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z], dom_map)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].extent, factor)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].min, xo * factor)
assert tvm.ir.structural_equal(in_dom.items()[0][1][0].extent, factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z], out_dom, in_dom, vadd)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(vadd.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_cache_write(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
_, _ = s[z].split(z.op.axis[0], factor=factor)
z_global = s.cache_write(z, "global")
xo, xi = z_global.op.axis
vadd = intrin_vadd(xo, m, factor)
s[z_global].tensorize(xi, vadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z_global], dom_map)
# outer loop var will be rebased, so min value is the new loop var and extent is 1
assert tvm.ir.structural_equal(out_dom[xo].extent, 1)
assert isinstance(out_dom[xo].min, tvm.tir.Var)
assert xo.var.name == out_dom[xo].min.name
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z_global], out_dom, in_dom, vadd)[0]
ana = tvm.arith.Analyzer()
vars = tvm.runtime.convert({xo.var: out_dom[xo].min})
vadd_body = tvm.tir.stmt_functor.substitute(vadd.op.body[0], vars)
assert tvm.ir.structural_equal(ana.simplify(body), ana.simplify(vadd_body))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_compute_reuse():
x, y, z = add(32)
def _intrin_vadd():
def _intrin_func(ins, outs):
return tvm.tir.call_packed("vadd", ins[0], ins[1], outs[0])
return tvm.te.decl_tensor_intrin(z.op, _intrin_func)
s = tvm.te.create_schedule(z.op)
s[z].tensorize(z.op.axis[0], _intrin_vadd())
tvm.lower(s, [x, y, z])
check(128, 16)
check_cache_write(129, 16)
check_compute_reuse()
def test_tensorize_matmul():
n = 1024
m = n
l = n
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
k = te.reduce_axis((0, l), name="k")
C = te.compute((n, m), lambda i, j: te.sum(B[j, k] * A[i, k], axis=k), name="C")
def check(factor):
s = te.create_schedule(C.op)
x, y = C.op.axis
yo, yi = s[C].split(y, factor=factor)
gemv = intrin_gemv(factor, l)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset_multi_reduction(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
roo, roi = s[C].split(ro, factor=2)
s[C].reorder(yo, roo, roi, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
check(16)
check_rfactor(16, 16)
check_rfactor_no_reset(16, 16)
check_rfactor_no_reset_multi_reduction(16, 16)
# This tests whether algorithm and intrinsics expressions are simplified
# as much as possible first and then checked for equality. See Issue #696
def test_tensorize_op():
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def op_intrin():
bh = 9
bw = 9
x = te.placeholder((5, 5), name="A")
y = te.compute((bh, bw), lambda i, j: x[idxd(j, 3) + idxm(i, 3), idxm(j, 3) + idxd(i, 3)])
def intrin_func(ins, outs):
(xx,) = ins
zz = outs[0]
return tvm.tir.call_packed("op", xx, zz)
return te.decl_tensor_intrin(y.op, intrin_func, default_buffer_params={"offset_factor": 2})
A = te.placeholder((5, 5), name="A")
B = te.compute((9, 9), lambda i, j: A[idxd(j, 3) + idxm(i, 3), idxm(j, 3) + idxd(i, 3)])
bt = op_intrin()
s = te.create_schedule(B.op)
x, y = B.op.axis
s[B].tensorize(x, bt)
s = s.normalize()
tvm.lower(s, [A, B])
# This test asserts that tensorize does not have any effect on
# TensorComputeOp operations
def test_tensorize_tensor_compute_op():
# an intrinsic called "multivadd" whose definition (pattern)
# is a loop of another intrinsic called "vadd"
def intrin_multivadd(n):
n_a = te.var("n_a")
Ab = tvm.tir.decl_buffer((n,), "float32", strides=[n_a])
n_b = te.var("n_b")
Bb = tvm.tir.decl_buffer((n,), "float32", strides=[n_b])
n_c = te.var("n_c")
Cb = tvm.tir.decl_buffer((n,), "float32", strides=[n_c])
z = te.compute(
(n,),
lambda i: tvm.tir.call_extern(
"float32",
"vadd",
Ab.access_ptr("w", offset=n_a * i),
Bb.access_ptr("r", offset=n_b * i),
Cb.access_ptr("r", offset=n_c * i),
),
)
# replace the pattern with the multivadd call. I need to figure out
# how to pass it the right parameters.
def intrin_func(ins, outs):
return tvm.tir.call_packed("multivadd")
return te.decl_tensor_intrin(z.op, intrin_func, name="multivadd")
def intrin_vadd(n):
dtype = "float32"
x = te.placeholder((n,), dtype=dtype, name="vx")
y = te.placeholder((n,), dtype=dtype, name="vy")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
def create_buffer(t):
return tvm.tir.decl_buffer(t.shape, t.dtype, name="W" + t.name, offset_factor=16)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"float32",
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(
z.op, intrin_func, binds={x: create_buffer(x), y: create_buffer(y), z: create_buffer(z)}
)
# cache_read, cache_write
M = 1024
factor = 16
dtype = "float32"
A = te.placeholder((M // factor, factor), name="A", dtype=dtype)
B = te.placeholder((M // factor, factor), name="B", dtype=dtype)
vadd = intrin_vadd(factor)
C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]), name="C")
s = te.create_schedule(C.op)
multivadd = intrin_multivadd(64)
s[C].tensorize(C.op.axis[0], multivadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
# The loop that we tried to tensorize still exists in the code
# That means tensorize didn't work as expected
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.loop_var.name == C.op.axis[0].var.name
if __name__ == "__main__":
test_tensorize_vadd()
test_tensorize_matmul()
test_tensorize_op()
test_tensorize_tensor_compute_op()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
from tvm import te
from tvm import te
@tvm.te.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = te.reduce_axis((0, IC), name="ic")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
return te.compute(
(N, OC, OH, OW),
lambda i, oc, h, w: te.sum(
data[i, ic, h + dh, w + dw] * weight[oc, ic, dh, dw], axis=[ic, dh, dw]
),
)
def test_with():
n = te.size_var("n")
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
with tvm.te.tag_scope(tag="gemm"):
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello": 1, "arr": [10, 12]},
)
assert C.op.tag == "gemm"
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.ir.load_json(tvm.ir.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
C = compute_conv(A, B)
assert C.op.tag == "conv"
assert len(C.op.attrs) == 0
def test_nested():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
try:
with te.tag_scope(tag="conv"):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.topi.nn.pooling import pool2d
def test_tensor():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
print(T)
print(T.op.body)
assert tuple(T.shape) == (m, n, l)
assert isinstance(A.op, tvm.te.PlaceholderOp)
assert A == A
assert T.op.output(0) == T
assert T.op.output(0).__hash__() == T.__hash__()
d = {T.op.output(0): 1}
assert d[T] == 1
assert T[0][0][0].astype("float16").dtype == "float16"
def test_rank_zero():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
scale = te.placeholder((), name="s")
k = te.reduce_axis((0, m), name="k")
T = te.compute((), lambda: te.sum(A[k] * scale(), axis=k))
print(T)
print(T.op.body)
assert tuple(T.shape) == ()
def test_conv1d():
n = te.size_var("n")
A = te.placeholder((n + 2), name="A")
def computeB(ii):
i = ii + 1
return A[i - 1] + A[i] + A[i + 1]
B = te.compute(n, computeB)
def test_tensor_slice():
n = te.size_var("n")
A = te.compute((n, n), lambda i, j: 1)
B = te.compute((n,), lambda i: A[0][i] + A[0][i])
def test_tensor_reduce_multi_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k1 = te.reduce_axis((0, n), "k")
k2 = te.reduce_axis((0, m), "k")
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=(k1, k2)))
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=[k1, k2]))
def test_tensor_comm_reducer():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), "k")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir.const(0, dtype=t))
C = te.compute((m,), lambda i: mysum(A[i, k], axis=k))
def test_tensor_comm_reducer_overload():
m = te.size_var("m")
n = te.size_var("n")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir.const(0, dtype=t))
sum_res = mysum(m, n)
def test_tensor_reduce():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
rv = te.reduce_axis((0, A.shape[1]), "k")
C = te.compute((m, n), lambda i, j: te.sum(T(i, j, rv + 1), axis=rv))
# json load save
C_json = tvm.ir.save_json(C)
C_loaded = tvm.ir.load_json(C_json)
assert isinstance(C_loaded, te.tensor.Tensor)
assert str(C_loaded) == str(C)
def test_tensor_reduce_multiout_with_cond():
def fcombine(x, y):
return x[0] + y[0], x[1] + y[1]
def fidentity(t0, t1):
return tvm.tir.const(0, t0), tvm.tir.const(1, t1)
mysum = te.comm_reducer(fcombine, fidentity, name="mysum")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="int32")
k = te.reduce_axis((0, n), "k")
cond = te.floormod(k, 2) == 0
T0, T1 = te.compute((m,), lambda i: mysum((idx[i, k], val[i, k]), axis=k, where=cond), name="T")
def test_tensor_compute1():
m = 1024
factor = 16
dtype = "float32"
def intrin_vadd(n):
x = te.placeholder((n,))
y = te.placeholder((n,))
z = te.compute(x.shape, lambda i: x[i] + y[i])
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
outs[0].dtype,
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vadd = intrin_vadd(factor)
A = te.placeholder((m // factor, factor), name="A", dtype=dtype)
B = te.placeholder((m // factor, factor), name="B", dtype=dtype)
C = te.compute((m // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]))
s = te.create_schedule(C.op)
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body, tvm.tir.Evaluate)
def test_tensor_compute2():
M = 2048
N = 1024
L = 1024
factor = 16
factor1 = 32
factor2 = 32
dtype = "float32"
def intrin_gemm(m, n, l):
k = te.reduce_axis((0, l))
x = te.placeholder((m, l))
y = te.placeholder((n, l))
# in theory, no relation
z = te.compute((m, n), lambda i, j: te.sum(x[i][k] * y[j][k], axis=k))
def intrin_func(ins, outs):
x_ptr = ins[0].access_ptr("r")
y_ptr = ins[1].access_ptr("r")
z_ptr = outs[0].access_ptr("w")
body = tvm.tir.call_packed("gemv", x_ptr, y_ptr, z_ptr, m, n, l)
reset = tvm.tir.call_packed("fill_zero", z_ptr, m, n)
update = tvm.tir.call_packed("gemv_add", x_ptr, y_ptr, z_ptr, m, n, l)
return body, reset, update
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vgemm = intrin_gemm(factor1, factor2, factor)
A = te.placeholder((M // factor1, L // factor, factor1, factor), name="A", dtype=dtype)
B = te.placeholder((N // factor2, L // factor, factor2, factor), name="B", dtype=dtype)
k = te.reduce_axis((0, L // factor), name="k")
C = te.compute(
(M // factor1, N // factor2, factor1, factor2),
lambda i, j: vgemm(
A[i, k, 0:factor1, 0:factor], B[j, k, 0:factor2, 0:factor], reduce_axis=k
),
)
s = te.create_schedule(C.op)
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body.body[0], tvm.tir.Evaluate)
assert isinstance(stmt.body.body[1].body, tvm.tir.Evaluate)
def test_tensor_scan():
m = te.size_var("m")
n = te.size_var("n")
x = te.placeholder((m, n))
s = te.placeholder((m, n))
res = tvm.te.scan(
te.compute((1, n), lambda _, i: x[0, i]),
te.compute((m, n), lambda t, i: s[t - 1, i] + x[t, i]),
s,
)
assert tuple(res.shape) == (m, n)
def test_scan_multi_out():
m = te.size_var("m")
n = te.size_var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + s2[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
assert r0.value_index == 0
assert r1.value_index == 1
json_str = tvm.ir.save_json(r0.op)
zz = tvm.ir.load_json(json_str)
assert isinstance(zz, tvm.te.ScanOp)
def test_extern():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, m)
B = te.extern((m,), [A], extern_func)
assert tuple(B.shape) == (m,)
def test_extern_multi_out():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] * 10)
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, outs[1].data, m)
res = te.extern([A.shape, A.shape], [A, B], extern_func)
assert len(res) == 2
assert res[1].value_index == 1
def test_tuple_inputs():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
T0, T1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="T")
s = te.create_schedule(T0.op)
for i in range(len(T0.shape)):
assert T0.shape[i] == T1.shape[i]
assert T0.op == T1.op
assert T0.value_index == 0
assert T1.value_index == 1
def test_tuple_with_different_deps():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder((m, n), name="A1")
A1 = te.placeholder((m, n), name="A2")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="B")
C = te.compute((m, n), lambda i, j: B0[i, j] + 4, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=10)
s[B0.op].compute_at(s[C], xo)
sch = s.normalize()
bounds = tvm.te.schedule.InferBound(sch)
stmt = tvm.te.schedule.ScheduleOps(sch, bounds)
def get_B1_realize(x):
if (
isinstance(x, tvm.tir.ProducerRealize)
and x.producer.op == B1.op
and x.producer.value_index == 1
):
ret.append(x)
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, get_B1_realize)
assert stmt.producer == C and len(ret) == 1
def test_tensor_inputs():
x = te.placeholder((1,), name="x")
y = te.compute(x.shape, lambda i: x[i] + x[i])
assert tuple(y.op.input_tensors) == (x,)
def test_tensor_pool():
def intrin_pool():
A = te.placeholder((64, 16, 16), name="A")
kh = te.reduce_axis((0, 3), name="kh")
kw = te.reduce_axis((0, 3), name="kw")
P = te.compute(
(64, 14, 14),
lambda c, oh, ow: tvm.te.max(A[c, oh + kh, ow + kw], axis=[kh, kw]),
name="p",
)
def intrin_func(ins, outs):
dinp = ins[0]
dout = outs[0]
return tvm.tir.call_packed("op", dinp, dout)
return te.decl_tensor_intrin(P.op, intrin_func, default_buffer_params={"offset_factor": 1})
A = te.placeholder((1, 64, 16, 16), name="A")
P = pool2d(
data=A, kernel=(3, 3), stride=(1, 1), dilation=(1, 1), padding=(0, 0, 0, 0), pool_type="max"
)
s = te.create_schedule(P.op)
_, oh, _, _ = P.op.axis
intrin = intrin_pool()
s[P].tensorize(oh, intrin)
tvm.lower(s, [A, P])
def test_tensor_scalar_mixed():
# test te with tensor and scalar
a = np.array(np.random.uniform(size=(10,)), "float32")
b = np.array(np.random.uniform(size=(1))[0], "float32")
c = np.array(np.random.uniform(size=(10,)), "float32")
@tvm.register_func("tvm.test_tensor_scalar_scale")
def my_scale(tensor, scalar, out):
out_np = tensor.numpy() * scalar.numpy()
tvm.nd.array(out_np).copyto(out)
A = te.placeholder(a.shape, name="A")
B = te.placeholder(b.shape, name="B")
C = te.extern(
a.shape,
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.test_tensor_scalar_scale", ins[0], ins[1], outs[0]
),
name="C",
)
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
tc = tvm.nd.array(c)
f(ta, tb, tc)
tvm.testing.assert_allclose(a * b, tc.numpy())
def test_tensor_scalar():
# test te with scalar shape
a = np.array(np.random.uniform(size=(1))[0], "float32")
b = np.array(0.0, "float32")
@tvm.register_func("tvm.test_tensor_scalar_copy")
def mycopy(x, y):
x.copyto(y)
A = te.placeholder(a.shape, name="A")
B = te.extern(
a.shape,
[A],
lambda ins, outs: tvm.tir.call_packed("tvm.test_tensor_scalar_copy", ins[0], outs[0]),
name="B",
)
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
f(ta, tb)
tvm.testing.assert_allclose(ta.numpy(), tb.numpy())
if __name__ == "__main__":
test_tensor()
test_rank_zero()
test_conv1d()
test_tensor_slice()
test_tensor_reduce_multi_axis()
test_tensor_comm_reducer()
test_tensor_comm_reducer_overload()
test_tensor_reduce()
test_tensor_reduce_multiout_with_cond()
test_tensor_compute1()
test_tensor_compute2()
test_tensor_scan()
test_scan_multi_out()
test_extern()
test_extern_multi_out()
test_tuple_inputs()
test_tuple_with_different_deps()
test_tensor_inputs()
test_tensor_pool()
test_tensor_scalar_mixed()
test_tensor_scalar()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_tensor_overload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
import tvm.testing
def test_operator_type_and_tags():
k = 1
n = te.var("n")
A = te.placeholder((), name="A")
B = te.placeholder((10, 5), name="B")
B1 = B[0]
B2 = B[0, 0]
assert isinstance(k + n, tvm.tir.PrimExpr)
assert isinstance(n + n, tvm.tir.PrimExpr)
assert isinstance(k + A, te.tensor.Tensor)
assert isinstance(A + k, te.tensor.Tensor)
assert isinstance(n + A, te.tensor.Tensor)
assert isinstance(A + n, te.tensor.Tensor)
assert isinstance(A + A, te.tensor.Tensor)
assert isinstance(k + B, te.tensor.Tensor)
assert isinstance(B + k, te.tensor.Tensor)
assert isinstance(n + B, te.tensor.Tensor)
assert isinstance(B + n, te.tensor.Tensor)
assert isinstance(A + B, te.tensor.Tensor)
assert isinstance(B + A, te.tensor.Tensor)
assert isinstance(B + B, te.tensor.Tensor)
assert (k + B).op.tag == topi.tag.ELEMWISE
assert (B + k).op.tag == topi.tag.ELEMWISE
assert (n + B).op.tag == topi.tag.ELEMWISE
assert (B + n).op.tag == topi.tag.ELEMWISE
assert (A + B).op.tag == topi.tag.BROADCAST
assert (B + A).op.tag == topi.tag.BROADCAST
assert (B + B).op.tag == topi.tag.BROADCAST
assert isinstance(k + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + k, tvm.tir.PrimExpr)
assert isinstance(n + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + n, tvm.tir.PrimExpr)
assert isinstance(B2 + B2, tvm.tir.PrimExpr)
assert isinstance(B2 + A, te.tensor.Tensor)
assert isinstance(A + B2, te.tensor.Tensor)
assert isinstance(B2 + B, te.tensor.Tensor)
assert isinstance(B + B2, te.tensor.Tensor)
def test_combination():
k = 3
n = 5
m = 10
x = te.var("x")
A = te.placeholder((n, m), name="A")
B = te.placeholder((n, m), name="B")
C = te.placeholder((n, m), name="C")
D = k + A - B * C + x
s = te.create_schedule(D.op)
foo = tvm.build(s, [x, A, B, C, D], "llvm")
dev = tvm.cpu(0)
x = 2
a = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(n, m)).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), dev)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
foo(x, a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), k + a.numpy() - b.numpy() * c.numpy() + x)
def verify_tensor_scalar_bop(shape, typ="add"):
"""Verify non-constant Tensor and scalar binary operations."""
sh = [te.size_var("n%d" % i) for i in range(0, len(shape))]
k = te.var("k")
A = te.placeholder(sh, name="A")
if typ == "add":
B = A + k
elif typ == "sub":
B = A - k
elif typ == "mul":
B = A * k
elif typ == "div":
B = A / k
else:
raise NotImplementedError()
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_elemwise_schedule(device)(B)
k_ = 2
foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ)
a_npy = np.random.uniform(size=shape).astype(A.dtype)
if typ == "add":
b_npy = a_npy + k_
elif typ == "sub":
b_npy = a_npy - k_
elif typ == "mul":
b_npy = a_npy * k_
elif typ == "div":
b_npy = a_npy / k_
else:
raise NotImplementedError()
a_nd = tvm.nd.array(a_npy, dev)
b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev)
foo(a_nd, b_nd, k_, *shape)
tvm.testing.assert_allclose(b_nd.numpy(), b_npy, rtol=1e-5)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
def verify_broadcast_bop(lhs_shape, rhs_shape, typ="add"):
A = te.placeholder(shape=lhs_shape, name="A")
B = te.placeholder(shape=rhs_shape, name="B")
if typ == "add":
C = A + B
elif typ == "sub":
C = A - B
elif typ == "mul":
C = A * B
elif typ == "div":
C = A / B
else:
raise NotImplementedError()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_broadcast_schedule(device)(C)
foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)
if typ == "add":
out_npy = lhs_npy + rhs_npy
elif typ == "sub":
out_npy = lhs_npy - rhs_npy
elif typ == "mul":
out_npy = lhs_npy * rhs_npy
elif typ == "div":
rhs_npy = np.abs(rhs_npy) + 0.001
out_npy = lhs_npy / rhs_npy
else:
raise NotImplementedError()
lhs_nd = tvm.nd.array(lhs_npy, dev)
rhs_nd = tvm.nd.array(rhs_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
for _ in range(1):
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
@tvm.testing.uses_gpu
def verify_conv2d_scalar_bop(
batch, in_size, in_channel, num_filter, kernel, stride, padding, typ="add"
):
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device)
k = 10.0
dilation = (1, 1)
with tvm.target.Target(device):
A = te.placeholder((batch, in_channel, in_size, in_size), name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
B = conv2d_nchw(A, W, stride, padding, dilation, A.dtype)
if typ == "add":
C = B + k
elif typ == "sub":
C = B - k
elif typ == "mul":
C = B * k
elif typ == "div":
C = B / k
else:
raise NotImplementedError()
s = schedule_conv2d_nchw([C])
foo = tvm.build(s, [A, W, B, C], device, name="conv2d_scalar_" + typ)
a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)
if typ == "add":
c_npy = b_npy + k
elif typ == "sub":
c_npy = b_npy - k
elif typ == "mul":
c_npy = b_npy * k
elif typ == "div":
c_npy = b_npy / k
else:
raise NotImplementedError()
a_nd = tvm.nd.array(a_npy, dev)
w_nd = tvm.nd.array(w_npy, dev)
b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev)
c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), dev)
foo(a_nd, w_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.numpy(), c_npy, rtol=1e-4, atol=1e-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]:
check_device(device)
@tvm.testing.uses_gpu
def test_tensor_scalar_bop():
verify_tensor_scalar_bop((1,), typ="add")
verify_tensor_scalar_bop((3, 5), typ="sub")
verify_tensor_scalar_bop((1, 3, 5), typ="mul")
verify_tensor_scalar_bop((2, 3, 1, 32), typ="div")
@tvm.testing.uses_gpu
def test_broadcast_bop():
verify_broadcast_bop((2, 3), (), typ="add")
verify_broadcast_bop((5, 2, 3), (1,), typ="add")
verify_broadcast_bop((1, 32), (64, 32), typ="sub")
verify_broadcast_bop((5, 64, 128), (2, 5, 64, 1), typ="mul")
verify_broadcast_bop((2, 3, 1, 32), (64, 32), typ="div")
@tvm.testing.uses_gpu
def test_conv2d_scalar_bop():
verify_conv2d_scalar_bop(1, 16, 4, 4, 3, 1, 1, typ="add")
verify_conv2d_scalar_bop(1, 32, 2, 1, 3, 1, 1, typ="sub")
verify_conv2d_scalar_bop(1, 32, 1, 1, 3, 1, 1, typ="mul")
verify_conv2d_scalar_bop(1, 16, 2, 1, 3, 1, 1, typ="div")
if __name__ == "__main__":
test_operator_type_and_tags()
test_combination()
test_tensor_scalar_bop()
test_broadcast_bop()
test_conv2d_scalar_bop()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_te_verify_compute.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_verify_compute():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
k_ = te.reduce_axis((0, m - 1), "k_")
f1 = lambda i: te.sum(A[i, k], axis=k)
f2 = lambda i: A[i, 0] + 1
f3 = lambda i: te.sum(A[i, k], axis=k) + 1
f4 = lambda i: A[i, 0] * (te.sum(A[i, k], axis=k) + 1)
f5 = lambda i: (te.sum(A[i, k], axis=k), A[i, 0] + 1)
f6 = lambda i: (te.sum(A[i, k], axis=k), te.sum(A[i, k_], axis=k_))
#
# Valid compute
try:
B = te.compute((n,), f1, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Valid compute
try:
B = te.compute((n,), f2, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f3, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f4, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with reduction and non-reduction batch ops
try:
B0, B1 = te.compute((n,), f5, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with unequal batch reduction ops
try:
B0, B1 = te.compute((n,), f6, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
if __name__ == "__main__":
test_verify_compute()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_testing.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
def test_check_numerical_grads():
# Functions and their derivatives
functions = [
lambda x: (x * x * x, 3 * x * x),
lambda x: (x * x, 2 * x),
lambda x: (np.abs(x), np.sign(x)),
lambda x: (np.log(np.abs(x)), 1 / x),
lambda x: (np.sqrt(np.abs(x)), np.sign(x) / (2 * np.sqrt(np.abs(x)))),
lambda x: (1 / x, -1 / (x * x)),
lambda x: (np.sign(np.sin(1 / x)), np.zeros_like(x)),
lambda x: (x * np.sin(1 / x), np.sin(1 / x) - np.cos(1 / x) / x),
lambda x: (np.sin(1 / x), -np.cos(1 / x) / (x * x)),
lambda x: (np.tan(x), 1.0 / (np.cos(x) * np.cos(x))),
]
np.random.seed(0)
# Avoid values too close to 0 since singularities of our functions are there
min_x = 0.5
for func in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
# We need a function returning a scalar, so sum the results
func_forw = lambda x: np.sum(func(x)[0])
grads = [func(x_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input], grads)
# Check functions with multiple arguments
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [f1(x_input)[1], f2(y_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
# Same thing but with keyword arguments
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": f1(x_input)[1], "y": f2(y_input)[1]}
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
def _noise1(x, atol=1e-2, rtol=0.1):
# We go in random direction using twice the original tolerance to be sure this
# results in an error
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
noise = np.random.normal(size=x.shape)
noise = tol * noise / np.linalg.norm(noise)
return x + noise
def _noise2(x, atol=1e-2, rtol=0.1):
# This noise affects just a single component
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
n = np.random.randint(np.prod(x.shape))
noise = np.zeros_like(x)
noise.reshape(-1)[n] = tol
return x + noise
# Add noise to gradients and check that the function throws
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [_noise1(f1(x_input)[1]), _noise1(f2(y_input)[1])]
try:
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": _noise2(f1(x_input)[1]), "y": _noise2(f2(y_input)[1])}
try:
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
if __name__ == "__main__":
test_tvm.testing.check_numerical_grads()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_calculate_workspace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
# fmt: off
@T.prim_func
def primfunc_global_allocates(placeholder_144: T.handle, placeholder_145: T.handle, placeholder_146: T.handle, T_cast_48: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_13", "tir.noalias": True})
placeholder_147 = T.match_buffer(placeholder_144, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_148 = T.match_buffer(placeholder_145, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_149 = T.match_buffer(placeholder_146, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_49 = T.match_buffer(T_cast_48, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_22 = T.decl_buffer([131072], "int16")
DepthwiseConv2d_9 = T.decl_buffer([100352], "int32")
for i1_29, i2_39, i3_40 in T.grid(16, 16, 512):
PaddedInput_22[(((i1_29*8192) + (i2_39*512)) + i3_40)] = T.if_then_else(((((1 <= i1_29) and (i1_29 < 15)) and (1 <= i2_39)) and (i2_39 < 15)), placeholder_147[((((i1_29*7168) + (i2_39*512)) + i3_40) - 7680)], T.int16(0), dtype="int16")
for i_9, j_9, c_9 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = 0
for di_9, dj_9 in T.grid(3, 3):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = (DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] + (PaddedInput_22[(((((i_9*8192) + (di_9*8192)) + (j_9*512)) + (dj_9*512)) + c_9)].astype("int32")*placeholder_148[(((di_9*1536) + (dj_9*512)) + c_9)].astype("int32")))
for ax1_27, ax2_28, ax3_30 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] = (DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] + placeholder_149[ax3_30])
for i1_30, i2_40, i3_41 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)] = T.q_multiply_shift(DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)], 1269068532, 31, -4, dtype="int32")
for i1_31, i2_41, i3_42 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)] = T.max(T.max(DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)], 255), 0)
for ax1_28, ax2_29, ax3_31 in T.grid(14, 14, 512):
PaddedInput_22[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)] = DepthwiseConv2d_9[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)].astype("uint8")
for ax1_29, ax2_30, ax3_32 in T.grid(14, 14, 512):
T_cast_49[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)] = PaddedInput_22[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)].astype("int16")
# fmt: on
# fmt: off
@T.prim_func
def primfunc_local_allocates(placeholder_162: T.handle, placeholder_163: T.handle, placeholder_164: T.handle, T_cast_76: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_9", "tir.noalias": True})
placeholder_165 = T.match_buffer(placeholder_162, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_166 = T.match_buffer(placeholder_163, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_167 = T.match_buffer(placeholder_164, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_77 = T.match_buffer(T_cast_76, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
sid_21 = T.allocate_const([0,1,2,3,4,5,6,7], "int8", [8])
# body
PaddedInput_25 = T.decl_buffer([131072], "int16")
for i1_35, i2_46, i3_47 in T.grid(16, 16, 512):
PaddedInput_25[(((i1_35*8192) + (i2_46*512)) + i3_47)] = T.if_then_else(((((1 <= i1_35) and (i1_35 < 15)) and (1 <= i2_46)) and (i2_46 < 15)), placeholder_165[((((i1_35*7168) + (i2_46*512)) + i3_47) - 7680)], T.int16(0), dtype="int16")
T_add_11 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as DepthwiseConv2d_11:
for i_11, j_11, c_11 in T.grid(14, 14, 512):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = 0
for di_11, dj_11 in T.grid(3, 3):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = (DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] + (PaddedInput_25[(((((i_11*8192) + (di_11*8192)) + (j_11*512)) + (dj_11*512)) + c_11)].astype("int32")*placeholder_166[(((di_11*1536) + (dj_11*512)) + c_11)].astype("int32")))
for ax1_44, ax2_45, ax3_47 in T.grid(14, 14, 512):
T_add_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] = (DepthwiseConv2d_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] + placeholder_167[ax3_47])
compute_22 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as T_cast_78:
for ax1_45, ax2_46, ax3_48 in T.grid(14, 14, 512):
T_cast_78[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)] = T_add_11[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)]
for i1_36, i2_47, i3_48 in T.grid(14, 14, 512):
compute_22[(((i1_36*7168) + (i2_47*512)) + i3_48)] = T.q_multiply_shift(T_cast_78[(((i1_36*7168) + (i2_47*512)) + i3_48)], 1948805937, 31, -5, dtype="int32")
T_cast_79 = T.decl_buffer([100352], "uint8")
with T.decl_buffer([100352], "int32") as compute_23:
for i1_37, i2_48, i3_49 in T.grid(14, 14, 512):
compute_23[(((i1_37*7168) + (i2_48*512)) + i3_49)] = T.max(T.max(compute_22[(((i1_37*7168) + (i2_48*512)) + i3_49)], 255), 0)
for ax1_46, ax2_47, ax3_49 in T.grid(14, 14, 512):
T_cast_79[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)] = compute_23[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)].astype("uint8")
for ax1_47, ax2_48, ax3_50 in T.grid(14, 14, 512):
T_cast_77[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)] = T_cast_79[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)].astype("int16")
# fmt: on
@pytest.mark.parametrize("alignment,size,consts", [(1, 663552, 0), (10, 663560, 0)])
def test_global_allocates(alignment, size, consts):
primfunc = primfunc_global_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
@pytest.mark.parametrize("alignment,size,consts", [(1, 1566720, 8), (100, 1567100, 100)])
def test_local_allocates(alignment, size, consts):
primfunc = primfunc_local_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
if __name__ == "__main__":
test_global_allocates()
test_local_allocates()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_detect_buffer_access_lca.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm.script import tir as T
@T.prim_func
def buffer_load_store_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
for ii, jj in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [ii, jj])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += (
D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
)
@T.prim_func
def buffer_opaque_access(b: T.handle, c: T.handle) -> None:
B = T.match_buffer(b, [16, 16], "float32")
C = T.match_buffer(c, [16, 16], "float32")
with T.block():
T.reads([])
T.writes(B[0:16, 0:16])
A = T.decl_buffer([256], "float32")
for i, j in T.grid(16, 16):
A[i * 16 + j] = 1
for i in range(0, 16):
for j in range(0, 16):
T.evaluate(A[i * 16 + j])
for j in range(0, 16):
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, T.float32(0), dtype="handle"))
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj]
@T.prim_func
def lca_is_func_root(a: T.handle) -> None:
A = T.match_buffer(a, [0, 0], "float32")
A[0, 0] = 1.0
@T.prim_func
def match_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8))
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
AA = T.match_buffer(A[vii, vjj], ())
AA[()] = 1.0
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def global_buffer_with_blockidx(
a: T.Buffer[(1, 32), "int32"], b: T.Buffer[(1, 32), "int32"]
) -> None:
for i0 in T.thread_binding(0, 1, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("copy"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(a[i, j])
T.writes(b[i, j])
b[i, j] = a[i, j]
def test_buffer_load_store():
func = buffer_load_store_func
A, B = [func.buffer_map[x] for x in func.params]
C, D = func.body.block.alloc_buffers
lca = tir.analysis.detect_buffer_access_lca(func)
# LCA of Buffer A is root
root_block = func.body.block
assert lca[A] == func.body.block
# LCA of Buffer B is reduction block
reduce_block = root_block.body[1].body.body.body.block
assert lca[B] == reduce_block
# LCA of Buffer C is the second loop kk
loop_jj = reduce_block.body.body
assert lca[C] == loop_jj
# LCA of Buffer D is loop jj
loop_kk = loop_jj.body[1]
assert lca[D] == loop_kk
def test_opaque_access():
func = buffer_opaque_access
B, C = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
# Cannot detect buffer A since it is define by low-level Allocate
# LCA of Buffer B is root
root_block = func.body.block
assert lca[B] == func.body.block
# LCA of Buffer C is the correspond block
assert lca[C] == root_block.body[1].body.body.block
def test_lca_func_root():
func = lca_is_func_root
(A,) = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
assert lca[A] is None
def test_match_buffer():
func = match_buffer_func
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
# LCA of Buffer C is the inner block
assert lca[A] == block_inner
# LCA of Buffer C is the main block
assert lca[B] == block
def test_global_buffer_with_blockidx():
func = global_buffer_with_blockidx
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
blockidx_loop = root_block.body
# LCA of both A and B should be the loop bound to `blockIdx`
assert lca[A] == blockidx_loop
assert lca[B] == blockidx_loop
if __name__ == "__main__":
test_buffer_load_store()
test_opaque_access()
test_lca_func_root()
test_match_buffer()
test_global_buffer_with_blockidx()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_estimate_tir_flops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm.testing
from tvm.ir import IRModule
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.script import tir as T
from tvm.tir.analysis import estimate_tir_flops
@pytest.mark.parametrize(
"workload, flops",
[
("C1D", 6291456),
("C2D", 236027904),
("C3D", 13217562624),
("CAP", 75497472),
("DEP", 7225344),
("DIL", 223552896),
("GMM", 4194304),
("GRP", 28901376),
("T2D", 268435456),
("CBR", 239239168),
("TBG", 25165824),
("NRM", 131072),
("SFM", 262144),
],
)
def test_te_workload(workload, flops):
te_workload = create_te_workload(workload, 0)
mod = IRModule({"main": te_workload})
assert float(flops) == estimate_tir_flops(mod)
@T.prim_func
def flops_with_let(a: T.Buffer[16, "float32"]):
for i in range(8):
j = i + 8
a[j] = a[i]
def test_flops_with_let():
flops = estimate_tir_flops(IRModule({"main": flops_with_let}))
assert flops == 8
@T.prim_func
def flops_with_if(a: T.Buffer[16, "float32"], b: T.Buffer[16, "float32"]):
for i in range(16):
if i % 2 == 0:
a[i] = b[i]
else:
if i % 3 == 0:
a[i] = b[i - 1] + b[i - 2]
def test_flops_with_if():
flops = estimate_tir_flops(IRModule({"main": flops_with_if}))
assert flops == 16
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_expr_deep_equal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_equal_expr():
x = te.var("x")
y = te.var("y")
def func1():
return x + y + 1
def func2():
return te.exp(tvm.tir.truncdiv((x + y + 1) * y, 4))
assert tvm.tir.analysis.expr_deep_equal(func1(), func1())
assert tvm.tir.analysis.expr_deep_equal(func2(), func2())
assert not tvm.tir.analysis.expr_deep_equal(func2(), func1())
if __name__ == "__main__":
test_equal_expr()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_get_block_access_region.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
from tvm.ir import Range
@T.prim_func
def func() -> None:
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
with T.block():
# Need add read/write region manually to avoid triggering block access region detector
T.reads([B[0, 0], C[0:16, 0:16], A[4:12, 4:12]])
T.writes([A[0:12, 0:12]])
for i, j in T.grid(8, 8):
A[i, j] = B[0, 0] + C[0, 0]
for i, j in T.grid(2, 2):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8], C[12:16, 12:16]])
T.writes([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8]])
for i, j in T.grid(4, 4):
A[vi * 4 + 4 + i, vj * 4 + 4 + j] += C[i + 12, j + 12]
T.evaluate(D.data)
@T.prim_func
def match_buffer_func() -> None:
with T.block("root"):
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
T.reads([])
T.writes([])
# Need add read/write region manually to avoid triggering block access region detector
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
AA = T.match_buffer(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16], (16, 16))
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(
B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8)
)
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
T.reads([])
T.writes(AA[vii, vjj])
AAA = T.match_buffer(AA[vii, vjj], ())
AAA[()] = 1.0
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def opaque_block_func() -> None:
with T.block("root"):
A = T.alloc_buffer((16, 16), "float32")
B = T.alloc_buffer((16, 16), "float32")
T.reads([])
T.writes([])
# Need add read/write region manually to avoid triggering block access region detector
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes([B[i, 0:16]])
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
B[i, j] = A[i, j] + 1.0
@T.prim_func
def opaque_access_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
for i in T.serial(0, 8):
with T.block():
v = T.axis.S(8, i)
T.reads([A[v * 128 : v * 128 + 128]])
T.writes([B[v * 128 : v * 128 + 128]])
T.evaluate(
T.call_extern("test", B.data, v * 128, 128, A.data, v * 128, 128, dtype="float32")
)
@T.prim_func
def opaque_access_with_tvm_access_ptr_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
C = T.alloc_buffer([1024])
with T.block("opaque"):
T.reads(A[0:1024], C[0:1024])
T.writes(B[0:1024], C[0:1024])
T.evaluate(A.access_ptr("r"))
T.evaluate(B.access_ptr("w"))
T.evaluate(C.access_ptr("rw"))
@T.prim_func
def access_in_if_then_else_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:5]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
B[i] = T.if_then_else(i < 5, A[i], 0.0, dtype="float32")
@T.prim_func
def access_in_branch_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:7]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
if i < 5:
B[i] = A[i] + 1.0
else:
B[i] = A[i - 1]
@T.prim_func
def gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j, k, ii, jj in T.grid(4, 4, 16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = 0
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def decomposed_gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
T.reads([])
T.writes(C[vi, vj])
C[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def access_of_padding_pattern() -> None:
X = T.alloc_buffer([28, 28])
X_pad = T.alloc_buffer([32, 32])
Y = T.alloc_buffer([28, 28])
for i, j in T.grid(32, 32):
with T.block("padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X[vi - 2, vj - 2]])
T.writes([X_pad[vi, vj]])
X_pad[vi, vj] = T.if_then_else(
2 <= vi and vi < 30 and 2 <= vj and vj < 30, X[vi - 2, vj - 2], 0.0, dtype="float32"
)
with T.block("padding_reverse"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X_pad[vi, vj]])
T.writes([Y[vi - 2, vj - 2]])
if 2 <= vi and vi < 30 and 2 <= vj and vj < 30:
Y[vi - 2, vj - 2] = X_pad[vi, vj]
def test_block_access_region_detector():
block = func.body.block.body.block
alloc_buffers = func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
D = alloc_buffers[-1]
tvm.ir.assert_structural_equal(
[tvm.tir.BufferRegion(D, [Range(0, 128), Range(0, 128)])], ret[2]
)
def test_opaque_block():
alloc_buffers = opaque_block_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
block0 = opaque_block_func.body.block.body.body.block
ret = tir.analysis.get_block_access_region(block0, buffer_var_map)
tvm.ir.assert_structural_equal(block0.reads, ret[0])
tvm.ir.assert_structural_equal(block0.writes, ret[1])
block1 = block0.body.body.block
ret = tir.analysis.get_block_access_region(block1, buffer_var_map)
tvm.ir.assert_structural_equal(block1.reads, ret[0])
tvm.ir.assert_structural_equal(block1.writes, ret[1])
def test_opaque_access():
block = opaque_access_func.body.block.body.body.block
alloc_buffers = opaque_access_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_opaque_access_with_tvm_access_ptr():
block = opaque_access_with_tvm_access_ptr_func.body.block.body.block
alloc_buffers = opaque_access_with_tvm_access_ptr_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret0[0])
tvm.ir.assert_structural_equal(block.writes, ret0[1])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_match_buffer():
root_block = match_buffer_func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
alloc_buffers = match_buffer_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
# Check block
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.writes, ret[1])
# B is opaque access
tvm.ir.assert_structural_equal(block.reads, ret[2])
# Check inner block AAA without updating buffer_var_map
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
# Since AA is not in the buffer_var_map, region of AA will not be collected.
tvm.ir.assert_structural_equal([], ret[1])
# Check inner block AAA
for match_buffer in block.match_buffers:
target_buffer = match_buffer.buffer
buffer_var_map[target_buffer.data] = target_buffer
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
tvm.ir.assert_structural_equal(block_inner.reads, ret[0])
tvm.ir.assert_structural_equal(block_inner.writes, ret[1])
def test_access_in_if_then_else_func():
block = access_in_if_then_else_func.body.block.body.block
alloc_buffers = access_in_if_then_else_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_in_branch_func():
block = access_in_branch_func.body.block.body.block
alloc_buffers = access_in_branch_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_of_padding_pattern():
s = tvm.tir.schedule.Schedule(access_of_padding_pattern)
alloc_buffers = s.get_sref(s.get_block("root")).stmt.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
def do_compare_buffer_region(region, expect):
assert region.buffer == expect.buffer
analyzer = tvm.arith.Analyzer()
for observed_range, expected_range in zip(region.region, expect.region):
analyzer.can_prove_equal(observed_range.min, expected_range.min)
analyzer.can_prove_equal(observed_range.extent, expected_range.extent)
def do_check_block(block_name):
block = s.get_sref(s.get_block(block_name)).stmt
expect_reads = block.reads
expect_writes = block.writes
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
for i, read in enumerate(ret[0]):
do_compare_buffer_region(read, expect_reads[i])
for i, write in enumerate(ret[1]):
do_compare_buffer_region(write, expect_writes[i])
do_check_block("padding")
do_check_block("padding_reverse")
def test_access_of_reduction():
block = gemm.body.block.body.body.body.body.body.body.block
alloc_buffers = gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
def test_access_of_decompose_reduction():
init = decomposed_gemm.body.block.body.body.body[0].body.body.block
update = decomposed_gemm.body.block.body.body.body[1].body.body.body.block
alloc_buffers = decomposed_gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
for block in [init, update]:
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
if __name__ == "__main__":
test_block_access_region_detector()
test_opaque_block()
test_opaque_access()
test_opaque_access_with_tvm_access_ptr()
test_match_buffer()
test_access_in_if_then_else_func()
test_access_in_branch_func()
test_access_of_padding_pattern()
test_access_of_reduction()
test_access_of_decompose_reduction()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_oob.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
@T.prim_func
def bad_load(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
B[0, 0] = A[2, 2]
@T.prim_func
def bad_load_loop(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
for i in range(3):
B[i, 0] = A[i, 2]
@T.prim_func
def bad_store(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
B[0, 3] = A[1, 2]
@T.prim_func
def bad_store_loop(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
for i in range(3):
B[0, i] = A[1, i]
@T.prim_func
def unknown_bounds(A: T.Buffer[(2, 3), "float32"], B: T.Buffer[(3, 2), "float32"]):
N = T.var("int32")
for i in range(3):
B[0, N] = A[1, i]
def test_oob_load():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load))
assert "buffer A" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load_loop))
assert "buffer A" in err.value.args[0]
def test_oob_store():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store))
assert "buffer B" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store_loop))
assert "buffer B" in err.value.args[0]
def test_unknown_bounds():
# This should not return an error as we can't probe that N goes out of bounds
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(unknown_bounds))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_stmt_finding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te, topi
from tvm.meta_schedule.testing.te_workload import conv2d_winograd_nhwc, matmul
from tvm.tir.analysis import find_anchor_block
def test_matmul_add():
n = m = k = 128
A, B, C = matmul(n, m, k)
mod = tvm.IRModule()
mod["main"] = te.create_prim_func([A, B, C + A])
block = find_anchor_block(mod)
assert block.name_hint == "C"
def test_winograd():
mod = tvm.IRModule()
mod["main"] = te.create_prim_func(conv2d_winograd_nhwc(1, 14, 14, 128, 128, 6))
block = find_anchor_block(mod)
assert block.name_hint == "bgemm"
def test_no_anchor_block():
inp = te.placeholder((10,), name="input")
out = topi.nn.relu(inp + 1.0)
mod = tvm.IRModule()
mod["main"] = te.create_prim_func([inp, out])
assert find_anchor_block(mod) is None
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_usedef.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
@pytest.mark.xfail
def test_loop_dependent_allocate():
N = te.size_var("N")
A = te.placeholder((2 * N,), "float32", "A")
C = te.compute((N,), lambda i: A[2 * i] + A[i + 1], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "local", [C])
s[AA].compute_at(s[C], s[C].op.axis[0])
# this line should fail due to IRUseDefAnalysis sees an allocate statement
# referencing undefined variable
tvm.lower(s, [A, C])
if __name__ == "__main__":
test_loop_dependent_allocate()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_verify_gpu_code.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test gpu code verifier"""
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def get_verify_pass(valid, **kwargs):
def _fverify(f, *_):
valid[0] = tvm.tir.analysis.verify_gpu_code(f, kwargs)
return f
return tvm.tir.transform.prim_func_pass(_fverify, opt_level=0)
@tvm.testing.requires_gpu
def test_shared_memory():
def check_shared_memory(storage_scope, dtype):
N = 1024
M = 128
tvm_type = tvm.runtime.DataType(dtype)
type_size = tvm_type.bits // 8 * tvm_type.lanes
A = te.placeholder((N,), name="A", dtype=dtype)
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
AA = s.cache_read(A, storage_scope, [B])
o, i = s[B].split(s[B].op.axis[0], M)
s[AA].compute_at(s[B], o)
s[B].bind(o, te.thread_axis("blockIdx.x"))
s[B].bind(i, te.thread_axis("threadIdx.x"))
# shared memory usage: M * sizeof(dtype) Bytes
# thread usage: M
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=type_size * M - 1,
max_threads_per_block=M,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=type_size * M,
max_threads_per_block=M,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
check_shared_memory("shared", "float32")
check_shared_memory("shared", "int8x4")
check_shared_memory("shared.dyn", "float32")
@tvm.testing.requires_gpu
def test_local_memory():
N = 1024
M = 128
A = te.placeholder((N,), name="A", dtype="float32")
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
AA = s.cache_read(A, "local", [B])
o, i = s[B].split(s[B].op.axis[0], M)
s[AA].compute_at(s[B], o)
s[B].bind(o, te.thread_axis("blockIdx.x"))
# local memory usage: M * 4B
# thread usage: M
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_local_memory_per_block=4 * M - 1, max_threads_per_block=1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_local_memory_per_block=4 * M, max_threads_per_block=1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_num_thread():
N = 1024
M = 128
A = te.placeholder((N,), name="A", dtype="float32")
B = te.compute((N,), lambda i: A[i], name="B")
s = te.create_schedule([B.op])
o, i = s[B].split(s[B].op.axis[0], M)
s[B].bind(o, te.thread_axis("threadIdx.x"))
s[B].bind(i, te.thread_axis("threadIdx.y"))
# shared memory usage: 0
# thread usage: N
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=0,
max_threads_per_block=N,
max_thread_y=M - 1,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid,
max_shared_memory_per_block=0,
max_threads_per_block=N,
max_thread_y=M,
),
)
]
}
):
tvm.build(s, [A, B], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_multiple_kernels():
N = 1024
A = te.placeholder((N, N), name="A")
B = te.compute((N, N), lambda i, j: A[i, j])
C = te.compute((N, N), lambda i, j: B[i, j])
s = te.create_schedule([C.op])
s[C].bind(s[C].op.axis[1], te.thread_axis("threadIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x"))
# shared memory usage: 0
# thread usage: N
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1
),
)
]
}
):
tvm.build(s, [A, C], target)
assert not valid[0]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [
(
2,
get_verify_pass(
valid, max_shared_memory_per_block=0, max_threads_per_block=N
),
)
]
}
):
tvm.build(s, [A, C], target)
assert valid[0]
@tvm.testing.requires_gpu
def test_wrong_bind():
N = 1024
A = te.placeholder((N, N - 1), name="A")
B = te.compute((N, N - 1), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
# bind a thread axis to two loop axes with different lengths
s[B].bind(s[B].op.axis[0], te.thread_axis("threadIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={
"tir.add_lower_pass": [(2, get_verify_pass(valid, max_threads_per_block=N * N))]
}
):
tvm.build(s, [A, B], target)
assert not valid[0]
@tvm.testing.requires_gpu
def test_vectorize():
N = 1024
A = te.placeholder((N, N), name="A")
B = te.compute((N, N), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=64)
s[B].bind(jo, te.thread_axis("threadIdx.x"))
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert not valid[0]
@tvm.testing.requires_gpu
def test_vectorize_half():
N = 1024
A = te.placeholder((N, N), name="A", dtype="float16")
B = te.compute((N, N), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=8)
s[B].bind(jo, te.thread_axis("threadIdx.x"))
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert valid[0]
@tvm.testing.requires_gpu
def test_vectorize_strided():
N = 1024
A = te.placeholder((N, N), name="A", dtype="float16")
B = te.compute((N, N), lambda i, j: A[j, i])
s = te.create_schedule([B.op])
i, j = s[B].op.axis
s[B].bind(i, te.thread_axis("blockIdx.x"))
jo, ji = s[B].split(j, factor=8)
s[B].vectorize(ji)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]}
):
tvm.lower(s, [A, B])
assert not valid[0]
@tvm.testing.requires_gpu
def test_vthread():
N = 1024
A = te.placeholder((N, 16), name="A")
B = te.compute((N, 16), lambda i, j: A[i, j])
s = te.create_schedule([B.op])
s[B].bind(s[B].op.axis[0], te.thread_axis("blockIdx.x"))
s[B].bind(s[B].op.axis[1], te.thread_axis("vthread"))
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
valid = [None]
for phase in [1, 2]:
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=16))]}
):
tvm.build(s, [A, B], target)
assert valid[0]
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=15))]}
):
tvm.build(s, [A, B], target)
assert not valid[0]
@tvm.testing.requires_gpu
def test_redundant_kernels():
dtype = "float32"
A = te.placeholder(shape=(1,), name="A", dtype=dtype)
B = te.placeholder(shape=(1,), name="B", dtype=dtype)
C = te.placeholder(shape=(1,), name="C", dtype=dtype)
D = topi.less(A, C)
E = topi.less(B, C)
F = topi.logical_or(D, E)
G = topi.identity(F)
for target in ["opencl", "cuda"]:
if not tvm.testing.device_enabled(target):
continue
print("Running on target: %s" % target)
valid = [None]
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(G)
with tvm.transform.PassContext(
config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_kernels=1))]}
):
tvm.build(s, [A, B, C, G], target)
assert valid[0]
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_verify_memory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import pytest
from tvm import te
import tvm.testing
# The following DLDeviceType/TVMDeviceExtType values
# are originally defined in dlpack.h and c_runtime_api.h.
gpu_devices = ["cuda", "opencl", "metal", "vulkan"]
other_devices = ["llvm", "ext_dev"]
# All computations are bound.
# So VerifyMemory pass is expected to succeed.
#
@tvm.testing.uses_gpu
def test_verify_memory_all_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
# B is bound to threads.
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices + other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
# Computations are not bound.
# So VerifyMemory pass fails when device type is GPU.
#
@tvm.testing.uses_gpu
def test_verify_memory_not_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
# B is not bound to threads.
s = te.create_schedule(B.op)
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
with pytest.raises(RuntimeError):
tvm.tir.transform.VerifyMemory()(binded_mod)
for dev_type in other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
# Computations are partially bound.
# So VerifyMemory pass fails when device type is GPU.
#
@tvm.testing.uses_gpu
def test_verify_memory_partially_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
C = te.compute(B.shape, lambda i: B[i] + 2.0, name="C")
D = te.compute(C.shape, lambda i: C[i] + 2.0, name="D")
# C is bound to threads, but B and D are not.
s = te.create_schedule([B.op, C.op, D.op])
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
mod = tvm.lower(s, [A, B, C, D])
for dev_type in gpu_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
with pytest.raises(RuntimeError):
tvm.tir.transform.VerifyMemory()(binded_mod)
for dev_type in other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
if __name__ == "__main__":
test_verify_memory_all_bind()
test_verify_memory_not_bind()
test_verify_memory_partially_bind()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_verify_ssa.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_verify_ssa():
x = te.var("x")
y = te.var()
z = tvm.tir.Evaluate(x + y)
assert tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([x, y], z))
assert not tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([x, y], tvm.tir.LetStmt(x, 1, z)))
def test_verify_weak_let_ssa():
x = te.var("x")
z1 = tvm.tir.Let(x, 1, x + 1)
z2 = tvm.tir.Let(x, 2, x + 2)
assert tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([], tvm.tir.Evaluate(z1 + z1)))
assert not tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([], tvm.tir.Evaluate(z1 * z2)))
if __name__ == "__main__":
test_verify_ssa()
test_verify_weak_let_ssa()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_analysis_verify_well_formed.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
def test_pass_simple():
@T.prim_func
def element_wise(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
):
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
# It's a opaque block , so it can use outside variables
C[i, j] = B[i, j] * 2.0
assert tvm.tir.analysis.verify_well_formed(element_wise)
def test_fail_use_out_loop_var():
@T.prim_func
def element_wise(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
):
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
# we cannot use `i` since it's defined outside the block
B[vi, vj] = A[i, vj] * 2.0
assert not tvm.tir.analysis.verify_well_formed(element_wise, assert_mode=False)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import pytest
from tvm import tir
from tvm._ffi.base import TVMError
from tvm.ir.transform import PassContext
import itertools
import pytest
def build_tir_func(func):
func = func.with_attr("global_symbol", "main")
pass_ctx = PassContext.current()
if pass_ctx.config.get("tir.noalias", True):
func = func.with_attr("tir.noalias", True)
mod = tvm.IRModule({"main": func})
func = tvm.build(mod)
return func
def test_scalar_add():
# All these types should be interchangeable with each other
# E.g. float16 + float32 upconverts the float16 --> float32
# Meanwhile if an int or float or together the int will be
# cast to the float type.
lhs_types = ["float32", "float16", "int32", "int64"]
rhs_types = ["float32", "float16"]
for lhs_type, rhs_type in itertools.product(lhs_types, rhs_types):
# Input vars should be float32, we will cast to test for upcasting between them
lhs_input = tir.Var("lhs", "float32")
rhs_input = tir.Var("rhs", "float32")
lhs = tir.Cast(lhs_type, lhs_input)
rhs = tir.Cast(rhs_type, rhs_input)
output = lhs + rhs
output = tir.ret(output)
output = tir.Evaluate(output)
func = tir.PrimFunc([lhs_input, rhs_input], output)
func = build_tir_func(func)
out = func(1.0, 2.0)
assert out == 3.0
def assignment_helper(store_dtype, value_dtype):
store = tir.Var("store", dtype=store_dtype)
value = tir.Var("value", dtype=value_dtype)
tir.Let(store, value, body=store)
def test_fail_implicit_downcasts_same_type():
# These lists should be sorted
bits = [8, 16, 32, 64]
for type in ["float", "int", "uint"]:
for i in range(len(bits) - 1):
with pytest.raises(TVMError):
assignment_helper(
store_dtype=f"{type}{bits[i]}", value_dtype=f"{type}{bits[i + 1]}"
)
def test_cast_between_types():
# We should only be able to assign values with the same types
bits = [16, 32]
types = ["float", "int", "uint"]
for store_type, store_bits, value_type, value_bits in itertools.product(
types, bits, types, bits
):
store_dtype = f"{store_type}{store_bits}"
value_dtype = f"{value_type}{value_bits}"
if store_dtype == value_dtype:
assignment_helper(store_dtype, value_dtype)
else:
# TODO: we might want to allow casts between uint and int types
with pytest.raises(TVMError):
assignment_helper(store_dtype, value_dtype)
def test_ret_const():
a = tir.const(0)
b = tir.ret(a)
b = tir.Evaluate(b)
func = tir.PrimFunc([], b)
func = build_tir_func(func)
out = func()
assert out == 0
def test_control_flow_jump():
ib = tvm.tir.ir_builder.create()
a = tir.Var("a", "float32")
b = tir.Var("b", "float32")
with ib.if_scope(True):
ib.emit(tir.Evaluate(tir.ret(a)))
ib.emit(tir.Evaluate(tir.ret(b)))
stmt = ib.get()
func = tir.PrimFunc([a, b], stmt)
func = build_tir_func(func)
out = func(1.0, 2.0)
assert out == 1.0
def test_exception():
with pytest.raises(tvm.TVMError):
x = tir.Var(name=1, dtype="int")
def test_eq_ops():
a = tir.IntImm("int8", 1)
with pytest.raises(ValueError):
assert a != None
with pytest.raises(ValueError):
assert not a == None
b = tir.StringImm("abc")
assert b != None
assert not b == None
if __name__ == "__main__":
test_scalar_add()
test_ret_const()
test_control_flow_jump()
test_exception()
test_eq_ops()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.tir import Buffer
import numpy as np
def test_buffer():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
Ab = tvm.tir.decl_buffer((m, n), "float32")
Bb = tvm.tir.decl_buffer((n, l), "float32")
assert isinstance(Ab, tvm.tir.Buffer)
assert Ab.dtype == "float32"
assert tuple(Ab.shape) == (m, n)
def test_buffer_access_ptr():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", strides=[n + 1, 1])
aptr = Ab.access_ptr("rw")
assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m)
assert aptr.args[0].dtype == Ab.dtype
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
aptr = Ab.access_ptr("w")
assert aptr.args[4].value == Buffer.WRITE
def test_buffer_access_ptr_offset():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32")
aptr = Ab.access_ptr("rw", offset=100)
tvm.testing.assert_prim_expr_equal(aptr.args[2], 100)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
v = te.size_var("int32")
aptr = Ab.access_ptr("rw", offset=100 + 100 + v)
tvm.testing.assert_prim_expr_equal(aptr.args[2], 200 + v)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
aptr = Ab.access_ptr("rw", offset=tvm.tir.call_extern("int32", "test_call", 100 + 100 + v))
tvm.testing.assert_prim_expr_equal(
aptr.args[2], tvm.tir.call_extern("int32", "test_call", 200 + v)
)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
def test_buffer_access_ptr_extent():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32")
aptr = Ab.access_ptr("rw")
assert tvm.ir.structural_equal(aptr.args[3], m * n)
aptr = Ab.access_ptr("rw", offset=100)
assert tvm.ir.structural_equal(aptr.args[3], m * n - 100)
Ab = tvm.tir.decl_buffer((m, n), "float32", strides=[n + 1, 1])
aptr = Ab.access_ptr("rw", offset=100)
assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m - 100)
# Test extent from input params
aptr = Ab.access_ptr("rw", extent=200)
assert tvm.ir.structural_equal(aptr.args[3], 200)
aptr = Ab.access_ptr("rw", offset=100, extent=100)
assert tvm.ir.structural_equal(aptr.args[3], 100)
def test_buffer_vload():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", elem_offset=100)
load = Ab.vload([2, 3])
tvm.ir.assert_structural_equal(load.indices, [2, 3])
def test_buffer_offset_of():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", elem_offset=100)
offset = Ab.offset_of([2, 3])
tvm.ir.assert_structural_equal(offset, [n * 2 + 103])
def test_buffer_vload_nullptr():
var = tvm.tir.Var("v", dtype="int32")
buf = tvm.tir.decl_buffer((1,), name="buf")
buf_load = tvm.tir.expr.BufferLoad(buffer=buf, indices=tvm.runtime.convert([0]))
buf_load_stmt = tvm.tir.stmt.Evaluate(buf_load)
for_loop = tvm.tir.stmt.For(
loop_var=var, kind=0, min_val=0, extent=tvm.tir.Cast("int32", buf_load), body=buf_load_stmt
)
buf_func = tvm.tir.PrimFunc(params={}, body=for_loop)
mod = tvm.IRModule({"main": buf_func})
# Trigger nullptr buffer bug by pass
with pytest.raises(tvm.error.TVMError) as cm:
mod = tvm.transform.Sequential(
[
tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(),
tvm.tir.transform.CompactBufferAllocation(),
tvm.tir.transform.LowerOpaqueBlock(),
tvm.tir.transform.FlattenBuffer(),
]
)(mod)
assert "(n != nullptr) is false" in str(cm.execption)
def test_buffer_index_merge_mult_mod():
m = te.size_var("m")
n = te.size_var("n")
s = te.size_var("s")
k0 = te.size_var("k0")
k1 = te.size_var("k1")
A = tvm.tir.decl_buffer((m, n), "float32")
A_stride = tvm.tir.decl_buffer((m, n), "float32", strides=(s, 1))
def assert_simplified_equal(index_simplified, index_direct):
assert tvm.ir.structural_equal(
index_simplified, index_direct
), "index_simplified=%s, index_direct=%s" % (index_simplified, index_direct)
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
# Test Case1
index_simplified = A_stride.offset_of(
(idxd(idxm(k0, k1), s), idxm(idxm(k0, k1), s) + idxd(k0, k1) * k1)
)
index_direct = A_stride.offset_of((0, k0))
assert_simplified_equal(index_simplified, index_direct)
# Test Case2
index_simplified = A.offset_of(
(idxd(idxm(k0, idxd(k1, s)), n), idxm(idxm(k0, idxd(k1, s)), n) + idxm(k0, k1))
)
index_direct = A.offset_of((0, idxm(k0, k1) + idxm(k0, idxd(k1, s))))
assert_simplified_equal(index_simplified, index_direct)
# Test Case3
index_simplified = A.offset_of(
(
idxd((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) + idxd(idxm(k0, idxd(k1, s)), n),
idxm((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) + idxm(idxm(k0, idxd(k1, s)), n),
)
)
index_direct = A.offset_of((0, k0))
assert_simplified_equal(index_simplified, index_direct)
# Test Case4 (not able to simplify)
index_simplified = A.offset_of(
(idxd(idxm(k0, idxd(k1, s)), n), idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1))
)
index_direct = A.offset_of(
(0, idxd(idxm(k0, idxd(k1, s)), n) * n + (idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1)))
)
assert_simplified_equal(index_simplified, index_direct)
# Test Case5
B = tvm.tir.decl_buffer((1, 14, 14, 1024))
i = te.size_var("i")
j = te.size_var("j")
k = te.size_var("k")
index_simplified1 = B.offset_of(
(
idxd(idxd(idxd((i * 50176 + j * 28672 + k), 1024), 14), 14),
idxm(idxd(idxd((i * 50176 + j * 28672 + k), 1024), 14), 14),
idxm(idxd((i * 50176 + j * 28672 + k), 1024), 14),
idxm((i * 50176 + j * 28672 + k), 1024),
)
)
index_simplified2 = B.offset_of(
(
idxd(idxd(i * 49 + j * 28 + idxd(k, 1024), 14), 14),
idxm(idxd(i * 49 + j * 28 + idxd(k, 1024), 14), 14),
idxm(i * 7 + idxd(k, 1024), 14),
idxm(k, 1024),
)
)
index_direct = B.offset_of((0, 0, 0, (i * 50176 + j * 28672 + k)))
assert_simplified_equal(index_simplified1, index_direct)
assert_simplified_equal(index_simplified2, index_direct)
@tvm.testing.requires_llvm
def test_buffer_broadcast():
m0, m1, m2 = te.size_var("m0"), te.size_var("m1"), te.size_var("m2")
n0, n1, n2 = te.size_var("n0"), te.size_var("n1"), te.size_var("n2")
o0, o1, o2 = te.size_var("o0"), te.size_var("o1"), te.size_var("o2")
A = te.placeholder((m0, m1, m2), name="A")
B = te.placeholder((n0, n1, n2), name="B")
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name="C")
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
def check():
fadd = tvm.build(s, [A, B, C], target="llvm", name="bcast_add", binds={A: Ab, B: Bb})
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 1)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check()
@tvm.testing.requires_llvm
def test_buffer_broadcast_expr():
n0, m0, x = te.size_var("n0"), te.size_var("m0"), te.size_var("x")
n1, m1 = te.size_var("n1"), te.size_var("m1")
o0, o1 = te.size_var("o0"), te.size_var("o1")
A = te.placeholder((m0, n0), name="A")
B = te.placeholder((m1, n1), name="B")
C = te.compute((o0, o1 // x), lambda i, j: A[i, j] + B[i, j], name="C")
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
Cc = tvm.tir.decl_buffer(C.shape, C.dtype, name="Cc", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
def check_stride():
fadd = tvm.build(
s, [A, B, C, o1, x], target="llvm", name="bcast_add", binds={A: Ab, B: Bb, C: Cc}
)
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
def check_no_stride():
fadd = tvm.build(
s, [A, B, C, o1, x], target="llvm", name="bcast_add", binds={A: Ab, B: Bb, C: Cc}
)
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
def check_auto_bind():
# Let build bind buffers
fadd = tvm.build(s, [A, B, C, o1, x], target="llvm", name="bcast_add")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_stride()
check_no_stride()
check_auto_bind()
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_constructor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
def test_expr_constructor():
x = tvm.tir.Var("xx", "float32")
assert isinstance(x, tvm.tir.Var)
assert x.name == "xx"
x = tvm.tir.Reduce(None, [1], [tvm.tir.IterVar((0, 1), "x", 2)], None, 0)
assert isinstance(x, tvm.tir.Reduce)
assert x.combiner == None
assert x.value_index == 0
x = tvm.tir.FloatImm("float32", 1.0)
assert isinstance(x, tvm.tir.FloatImm)
assert x.value == 1.0
assert x.dtype == "float32"
x = tvm.tir.IntImm("int64", 2)
assert isinstance(x, tvm.tir.IntImm)
assert x.value == 2
assert x.dtype == "int64"
x = tvm.tir.StringImm("xyza")
assert isinstance(x, tvm.tir.StringImm)
assert x.value == "xyza"
x = tvm.tir.Cast("float32", tvm.tir.IntImm("uint32", 1))
assert isinstance(x, tvm.tir.Cast)
assert x.dtype == "float32"
assert x.value.value == 1
a = tvm.tir.const(1.0, dtype="float32")
b = te.var("x", dtype="float32")
for cls in [
tvm.tir.Add,
tvm.tir.Sub,
tvm.tir.Mul,
tvm.tir.Div,
tvm.tir.Mod,
tvm.tir.Min,
tvm.tir.Max,
tvm.tir.LT,
tvm.tir.LE,
tvm.tir.GT,
tvm.tir.GE,
]:
x = cls(a, b)
assert isinstance(x, cls)
assert x.a == a
assert x.b.same_as(b)
a = tvm.runtime.convert(te.var("x") > 1)
b = tvm.runtime.convert(te.var("x") == 1)
for cls in [tvm.tir.And, tvm.tir.Or]:
x = cls(a, b)
assert isinstance(x, cls)
assert x.a == a
assert x.b.same_as(b)
x = tvm.tir.Not(a)
assert isinstance(x, tvm.tir.Not)
assert x.a == a
x = tvm.tir.Select(a, a, b)
assert isinstance(x, tvm.tir.Select)
assert x.true_value == a
assert x.false_value == b
assert x.condition == a
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32")))
buffer = tvm.tir.decl_buffer([16], "float32", data=buffer_var)
x = tvm.tir.BufferLoad(buffer, [1])
assert isinstance(x, tvm.tir.BufferLoad)
assert x.dtype == "float32"
assert x.buffer == buffer
assert x.buffer.data == buffer_var
assert list(x.indices) == [1]
x = tvm.tir.Ramp(1, 2, 10)
assert isinstance(x, tvm.tir.Ramp)
assert x.base.value == 1
assert x.stride.value == 2
assert x.lanes == 10
x = tvm.tir.Broadcast(a, 10)
assert isinstance(x, tvm.tir.Broadcast)
assert x.value == a
assert x.lanes == 10
x = tvm.tir.Shuffle([a], [0])
assert isinstance(x, tvm.tir.Shuffle)
assert x.vectors[0] == a
assert x.indices[0].value == 0
x = tvm.tir.Call("float32", "tir.call_extern", [tvm.tir.StringImm("xyz"), a])
assert isinstance(x, tvm.tir.Call)
assert x.dtype == "float32"
assert x.op.name == "tir.call_extern"
assert x.args[1] == a
v = te.var("aa")
x = tvm.tir.Let(v, 1, v)
assert x.var == v
assert x.value.value == 1
assert x.body == v
def test_stmt_constructor():
v = te.var("aa")
nop = tvm.tir.Evaluate(1)
x = tvm.tir.LetStmt(v, 1, tvm.tir.Evaluate(1))
assert isinstance(x, tvm.tir.LetStmt)
assert x.var == v
assert x.value.value == 1
assert isinstance(x.body, tvm.tir.Evaluate)
x = tvm.tir.AttrStmt(v == 1, "xx", 1, tvm.tir.Evaluate(1))
assert isinstance(x, tvm.tir.AttrStmt)
assert x.value.value == 1
x = tvm.tir.AssertStmt(tvm.tir.const(1, "uint1"), tvm.runtime.convert("hellow"), nop)
assert isinstance(x, tvm.tir.AssertStmt)
assert x.body == nop
x = tvm.tir.For(te.var("x"), 0, 10, tvm.tir.ForKind.SERIAL, nop)
assert isinstance(x, tvm.tir.For)
assert x.min.value == 0
assert x.extent.value == 10
assert x.body == nop
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("uint1")))
buffer = tvm.tir.decl_buffer([16], "uint1", data=buffer_var)
x = tvm.tir.BufferStore(buffer, 1, [10])
assert isinstance(x, tvm.tir.BufferStore)
assert x.buffer == buffer
assert x.buffer.data == buffer_var
assert list(x.indices) == [10]
assert x.value.value == 1
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32")))
x = tvm.tir.Allocate(buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), nop)
assert isinstance(x, tvm.tir.Allocate)
assert x.dtype == "float32"
assert x.buffer_var == buffer_var
assert x.body == nop
storage_scope = "global.texture"
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32"), storage_scope))
x = tvm.tir.Allocate(buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), nop)
assert isinstance(x, tvm.tir.Allocate)
assert x.dtype == "float32"
assert x.buffer_var == buffer_var
assert x.buffer_var.type_annotation.storage_scope == storage_scope
assert x.body == nop
x = tvm.tir.AttrStmt(buffer_var, "xyz", 1, nop)
assert isinstance(x, tvm.tir.AttrStmt)
assert x.node == buffer_var
assert x.attr_key == "xyz"
assert x.body == nop
x = tvm.tir.IfThenElse(tvm.tir.const(1, "uint1"), tvm.tir.Evaluate(11), nop)
assert isinstance(x, tvm.tir.IfThenElse)
assert x.then_case.value.value == 11
assert x.else_case == nop
b = tvm.tir.decl_buffer((1, 2))
x = tvm.tir.Prefetch(b, [])
assert isinstance(x, tvm.tir.Prefetch)
def test_float_constructor_requires_float_dtype():
with pytest.raises(tvm.TVMError):
tvm.tir.FloatImm("int32", 1.0)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_data_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout and bijective-layout node"""
import tvm
from tvm import te
from tvm.topi.utils import get_const_tuple
def test_layout():
layout = tvm.tir.layout("NCHW16c")
assert layout is not None
assert isinstance(layout, tvm.tir.Layout)
assert layout.factor_of("c") == 16
assert layout.factor_of("C") == 16
assert layout.factor_of("N") == -1
assert layout.index_of("N") == 0
assert layout.index_of("C") == 1
assert layout.index_of("H") == 2
assert layout.index_of("W") == 3
assert layout.index_of("c") == 4
assert layout.index_of("O") == -1
assert "N" in layout
assert "C" in layout
assert "H" in layout
assert "W" in layout
assert "c" in layout
assert "O" not in layout
assert layout[0] == "N"
assert layout[1] == "C"
assert layout[2] == "H"
assert layout[3] == "W"
assert layout[4] == "c"
assert layout[-1] == "c"
def test_bilayout_convertible():
# not convertible
assert tvm.tir.bijective_layout("NCHW", "ABCD") is None
assert tvm.tir.bijective_layout("__undef__", "NCHW") is None
assert tvm.tir.bijective_layout("NCHW", "__undef__") is None
assert tvm.tir.bijective_layout("__undef__", "__undef__") is None
assert tvm.tir.bijective_layout("", "NCHW") is None
assert tvm.tir.bijective_layout("NCHW", "") is None
assert tvm.tir.bijective_layout("", "") is None
# convertible
assert tvm.tir.bijective_layout("NCHW", "NCHW16c") is not None
def test_bilayout_shape():
bilayout = tvm.tir.bijective_layout("NCHW", "NCHW16c")
assert isinstance(bilayout, tvm.tir.BijectiveLayout)
dst_shape = bilayout.forward_shape((1, 32, 7, 7))
assert get_const_tuple(dst_shape) == (1, 2, 7, 7, 16)
src_shape = bilayout.backward_shape(dst_shape)
assert get_const_tuple(src_shape) == (1, 32, 7, 7)
def test_bilayout_index():
bilayout = tvm.tir.bijective_layout("NCHW", "NCHW16c")
dst_index = bilayout.forward_index([0, 18, 6, 6])
assert get_const_tuple(dst_index) == (0, 1, 6, 6, 2)
src_index = bilayout.backward_index([0, 1, 6, 6, 2])
assert get_const_tuple(src_index) == (0, 18, 6, 6)
if __name__ == "__main__":
test_layout()
test_bilayout_convertible()
test_bilayout_shape()
test_bilayout_index()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_imm_values.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import random
import numpy as np
import tvm
import tvm.testing
import pytest
from tvm import tir
from tvm.script import tir as T
import pytest
@pytest.mark.parametrize(
"dtype, literals",
[
["int8", [-128, 0, 127]],
["uint8", [0, 255]],
["int32", [-2147483648, 2147483647]],
["uint32", [0, 4294967295]],
["int64", [-9223372036854775808, 9223372036854775807]],
["uint64", [0, 9223372036854775807]],
],
)
def test_tir_make_intimm(dtype, literals):
for l in literals:
imm = tir.const(l, dtype)
assert imm.value == l, imm
@pytest.mark.parametrize(
"dtype, literals",
[
["int8", [-129, 128]],
["uint8", [-1, 256]],
["int32", [-2147483650, 2147483648]],
["uint32", [-1, 4294967296]],
["uint64", [-1, 18446744073709551616]],
],
)
def test_tir_invalid_intimm(dtype, literals):
for l in literals:
with pytest.raises(tvm.TVMError):
tir.const(l, dtype)
@pytest.mark.parametrize(
"dtype, literals",
[
[
"uint64",
{
9223372036854775807: 9223372036854775807,
18446744073709551615: 18446744073709551615,
},
],
],
)
def test_tir_large_py_int_literals(dtype, literals):
"""
For large uint value, use LargeUIntImm intrin,
"""
for l in literals:
x = tir.const(l, dtype)
if isinstance(x, (tir.IntImm, tir.FloatImm)):
assert x.value == literals[l]
else:
# LargeUIntImm(low32, hi32)
assert (int(x.args[1]) << 32) + int(x.args[0]) == literals[l]
def test_tir_intimm_overflow():
assert int(tir.const(255, "uint8") + tir.const(1, "uint8")) == 0
assert int(tir.const(2**31 - 1, "int32") + tir.const(1, "int32")) == -(2**31)
assert int(tir.const(2**32 - 1, "uint32") + tir.const(1, "uint32")) == 0
assert int(tir.const(2**63 - 1, "int64") + tir.const(1, "int64")) == -(2**63)
assert int(tir.const(2**32, "uint64") * tir.const(2**32, "uint64")) == 0
# customized int types
assert int(tir.const(7, "int4") + tir.const(1, "int4")) == -8
assert int(tir.const(2**39 - 1, "int40") + tir.const(1, "int40")) == -(2**39)
def compare_float_value(value, expect, msg):
if math.isfinite(value):
assert np.abs(value - expect) < 1e-5, f"{value} vs {expect}, {msg}"
elif math.isnan(value):
assert math.isnan(expect), f"{value} vs {expect}, {msg}"
elif math.isinf(value):
assert math.isinf(expect), f"{value} vs {expect}, {msg}"
@pytest.mark.parametrize(
"dtype, literals",
[
["float16", [-65504.0, 3.14, 65504.0, np.inf, np.nan]],
["bfloat16", [-3.38953139e38, 3.38953139e38, 3.14]],
["float32", [np.finfo("float32").min, 3.14, np.finfo("float32").max, np.inf, np.nan]],
["float64", [np.finfo("float64").min, 3.14, np.finfo("float64").max, np.inf, np.nan]],
],
)
def test_tir_make_floatimm(dtype, literals):
for l in literals:
imm = tir.const(l, dtype)
compare_float_value(imm.value, l, "imm value should match feed value")
@pytest.mark.parametrize(
"dtype, literals",
[
["float16", [-65505.0, 65505.0]],
["float32", [-3.402e39, 3.402e39]],
],
)
def test_tir_invalid_floatimm(dtype, literals):
"""Currently only fp16 and fp32 have range check."""
for l in literals:
with pytest.raises(tvm.TVMError):
tir.const(l, dtype)
@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"])
@pytest.mark.parametrize("literal", [3.14, np.nan, np.inf])
def test_tir_special_floatimms(dtype, literal):
x = tir.const(literal, dtype)
compare_float_value(x.value, literal, "imm value should match feed value")
@tvm.testing.requires_llvm()
def test_tir_too_large_literal_f64():
# Behavior check: if literal f64 value is out of dtype range, the
# object is still constructed, and eval to infinity.
@T.prim_func
def imm_overflow_fp64() -> T.float64:
T.evaluate(T.ret(T.float64(1.7976e309), dtype="float64"))
f = tvm.build(imm_overflow_fp64, target="llvm")
assert math.isinf(f())
@pytest.mark.parametrize(
"literal, expect_dtype",
[
(256, "int32"),
(2147483647, "int32"),
(-2147483648, "int32"),
(2147483648, "int64"),
(-2147483649, "int64"),
(3.14159, "float32"),
(np.finfo("float32").min, "float32"),
(np.finfo("float32").max, "float32"),
(-3.402e39, "float64"),
(3.402e39, "float64"),
],
)
def test_tir_const_auto_dtype(literal, expect_dtype):
x = tir.const(literal, dtype=None)
assert x.dtype == expect_dtype
assert x.value == literal
def check_tir_const_fold(
dtype, foldf, calcf, x_range=None, y_range=None, expect=None, skip_overflow=False
):
"""Helper to check constant folding behavior
Parameters
----------
dtype: str
Datatype of constants
foldf: (x, y) -> z
Folding function to call
calcf: (x, y) -> z
Compiled calculation function to call
x_range: Union[int, float, tuple]
Single value or value range [min, max]
y_range: Union[int, float, tuple]
Single value or value range [min, max]
expect: Union[int, float]
Expected calculation result
skip_overflow: bool
Skip assertion if the overflow happens
"""
seed = random.randint(0, 2147483648)
np.random.seed(seed)
ninfo = np.finfo(dtype) if dtype.startswith("float") else np.iinfo(dtype)
if x_range is None:
x_range = (ninfo.min, ninfo.max)
if isinstance(x_range, (int, float)):
x = x_range
elif dtype.startswith("int") or dtype.startswith("uint"):
x = np.random.randint(x_range[0], x_range[1] + 1, dtype=dtype)
else:
x = np.random.uniform(x_range[0], x_range[1])
if y_range is None:
y_range = (ninfo.min, ninfo.max)
if isinstance(y_range, (int, float)):
y = y_range
elif dtype.startswith("int") or dtype.startswith("uint"):
y = np.random.randint(y_range[0], y_range[1] + 1, dtype=dtype)
else:
y = np.random.uniform(y_range[0], y_range[1])
if skip_overflow:
py_res = foldf(x, y)
if isinstance(py_res, (tir.IntImm, tir.FloatImm)):
py_res = py_res.value
if not (ninfo.min <= py_res <= ninfo.max):
# If the result overflow, certain arithmetics is non-defined
# thus we intentionally do not make the test failed.
return
fold_res = foldf(tir.const(x, dtype), tir.const(y, dtype))
calc_res = calcf(x, y)
flaky_msg = (
f"{dtype} ({x}, {y}, {expect}) const folding check failed.\n"
+ "This test is intentionally non-deterministic, "
+ f"if it fails please report it in github issue together with this seed {seed}\n"
)
if dtype.startswith("float"):
compare_float_value(calc_res, fold_res.value, flaky_msg)
if expect:
compare_float_value(expect, calc_res, flaky_msg)
else:
assert calc_res == fold_res.value, flaky_msg
if expect:
assert expect == calc_res, flaky_msg
@tvm.testing.requires_llvm()
def test_tir_floatimm_const_fold():
"""Behavior check: folding fp32 match platform f32 arithmetic"""
@T.prim_func
def float_imm_multiply(x: T.float32, y: T.float32, z: T.Buffer[(), "float32"]):
z[()] = x * y
@T.prim_func
def float_imm_add(x: T.float32, y: T.float32, z: T.Buffer[(), "float32"]):
z[()] = x + y
@T.prim_func
def float_imm_sub(x: T.float32, y: T.float32, z: T.Buffer[(), "float32"]):
z[()] = x - y
@T.prim_func
def float_imm_div(x: T.float32, y: T.float32, z: T.Buffer[(), "float32"]):
z[()] = x / y
def __wrap_build(f):
lib = tvm.build(f, target="llvm")
z = tvm.nd.array(np.zeros([]).astype("float32"))
def _func(x, y):
lib(x, y, z)
return z.numpy()
return _func
fmul = __wrap_build(float_imm_multiply)
fadd = __wrap_build(float_imm_add)
fsub = __wrap_build(float_imm_sub)
fdiv = __wrap_build(float_imm_div)
# overflow
check_tir_const_fold("float32", lambda x, y: x * y, fmul, 3.0e30, 3.0e30, np.inf)
check_tir_const_fold("float32", lambda x, y: x * y, fmul, 3.0e30, -3.0e30, -np.inf)
check_tir_const_fold("float32", lambda x, y: x / y, fdiv, 3.0e30, 3.0e-30, np.inf)
# divide by zero
with pytest.raises(tvm.TVMError):
check_tir_const_fold("float32", lambda x, y: x / y, fdiv, 1.0, 0.0)
# nan and inf
check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, np.nan, np.nan)
check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, np.inf, np.inf)
check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, -np.inf, -np.inf)
# randomized check
check_tir_const_fold("float32", lambda x, y: x * y, fmul)
check_tir_const_fold("float32", lambda x, y: x + y, fadd)
check_tir_const_fold("float32", lambda x, y: x - y, fsub)
check_tir_const_fold(
"float32", lambda x, y: x / y, fdiv, y_range=(0.01, np.finfo("float32").max)
)
@tvm.testing.requires_llvm()
def test_tir_int8_const_fold():
"""Behavior check: folding i8 operation match platform i8 arithmetic"""
@T.prim_func
def imm_multiply(x: T.int8, y: T.int8) -> T.int8:
T.evaluate(T.ret(x * y, dtype="int8"))
@T.prim_func
def imm_add(x: T.int8, y: T.int8) -> T.int8:
T.evaluate(T.ret(x + y, dtype="int8"))
@T.prim_func
def imm_sub(x: T.int8, y: T.int8) -> T.int8:
T.evaluate(T.ret(x - y, dtype="int8"))
@T.prim_func
def imm_truncdiv(x: T.int8, y: T.int8) -> T.int8:
T.evaluate(T.ret(T.truncdiv(x, y), dtype="int8"))
@T.prim_func
def imm_floordiv(x: T.int8, y: T.int8) -> T.int8:
T.evaluate(T.ret(T.floordiv(x, y), dtype="int8"))
fmul = tvm.build(imm_multiply, target="llvm")
fadd = tvm.build(imm_add, target="llvm")
fsub = tvm.build(imm_sub, target="llvm")
ffloordiv = tvm.build(imm_floordiv, target="llvm")
ftruncdiv = tvm.build(imm_truncdiv, target="llvm")
# overflow
check_tir_const_fold("int8", lambda x, y: x + y, fadd, 127, 1, -128)
check_tir_const_fold("int8", lambda x, y: x * y, fmul, 127, 127, 1)
# divide by zero
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int8", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0)
# i8 mod folding is not implemented
assert not isinstance(tir.floormod(tir.const(7, "int8"), tir.const(3, "int8")), tir.IntImm)
assert not isinstance(tir.truncmod(tir.const(7, "int8"), tir.const(3, "int8")), tir.IntImm)
# randomized check
check_tir_const_fold("int8", lambda x, y: x * y, fmul)
check_tir_const_fold("int8", lambda x, y: x + y, fadd)
check_tir_const_fold("int8", lambda x, y: x - y, fsub)
check_tir_const_fold(
"int8", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("int8").max)
)
check_tir_const_fold(
"int8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("int8").max)
)
@tvm.testing.requires_llvm()
def test_tir_uint8_const_fold():
"""Behavior check: folding u8 operation match platform u8 arithmetic"""
@T.prim_func
def imm_multiply(x: T.uint8, y: T.uint8) -> T.uint8:
T.evaluate(T.ret(x * y, dtype="uint8"))
@T.prim_func
def imm_add(x: T.uint8, y: T.uint8) -> T.uint8:
T.evaluate(T.ret(x + y, dtype="uint8"))
@T.prim_func
def imm_sub(x: T.uint8, y: T.uint8) -> T.uint8:
T.evaluate(T.ret(x - y, dtype="uint8"))
@T.prim_func
def imm_truncdiv(x: T.uint8, y: T.uint8) -> T.uint8:
T.evaluate(T.ret(T.truncdiv(x, y), dtype="uint8"))
@T.prim_func
def imm_floordiv(x: T.uint8, y: T.uint8) -> T.uint8:
T.evaluate(T.ret(T.floordiv(x, y), dtype="uint8"))
fmul = tvm.build(imm_multiply, target="llvm")
fadd = tvm.build(imm_add, target="llvm")
fsub = tvm.build(imm_sub, target="llvm")
ffloordiv = tvm.build(imm_floordiv, target="llvm")
ftruncdiv = tvm.build(imm_truncdiv, target="llvm")
# overflow
check_tir_const_fold("uint8", lambda x, y: x + y, fadd, 255, 1, 0)
# zero sub
with pytest.raises(tvm.TVMError):
check_tir_const_fold("uint8", lambda x, y: x - y, fsub, 0, 10)
# divide by zero
with pytest.raises(tvm.TVMError):
check_tir_const_fold("uint8", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("uint8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0)
# u8 mod folding is not implemented
assert not isinstance(tir.floormod(tir.const(7, "uint8"), tir.const(3, "uint8")), tir.IntImm)
assert not isinstance(tir.truncmod(tir.const(7, "uint8"), tir.const(3, "uint8")), tir.IntImm)
# randomized check
check_tir_const_fold("uint8", lambda x, y: x * y, fmul)
check_tir_const_fold("uint8", lambda x, y: x + y, fadd)
check_tir_const_fold("uint8", lambda x, y: x - y, fsub)
check_tir_const_fold(
"uint8", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("uint8").max)
)
check_tir_const_fold(
"uint8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("uint8").max)
)
@tvm.testing.requires_llvm()
def test_tir_int32_const_fold():
"""Behavior check: folding i32 operation match platform i32 arithmetic"""
@T.prim_func
def imm_multiply(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(x * y, dtype="int32"))
@T.prim_func
def imm_add(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(x + y, dtype="int32"))
@T.prim_func
def imm_sub(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(x - y, dtype="int32"))
@T.prim_func
def imm_truncdiv(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(T.truncdiv(x, y), dtype="int32"))
@T.prim_func
def imm_truncmod(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(T.truncmod(x, y), dtype="int32"))
@T.prim_func
def imm_floordiv(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(T.floordiv(x, y), dtype="int32"))
@T.prim_func
def imm_floormod(x: T.int32, y: T.int32) -> T.int32:
T.evaluate(T.ret(T.floormod(x, y), dtype="int32"))
fmul = tvm.build(imm_multiply, target="llvm")
fadd = tvm.build(imm_add, target="llvm")
fsub = tvm.build(imm_sub, target="llvm")
ffloordiv = tvm.build(imm_floordiv, target="llvm")
ffloormod = tvm.build(imm_floormod, target="llvm")
ftruncdiv = tvm.build(imm_truncdiv, target="llvm")
ftruncmod = tvm.build(imm_truncmod, target="llvm")
# i32 overflow is not specified, only check for range
assert -(2**31) <= int(tir.const(2**31 - 1, "int32") + tir.const(1, "int32")) < 2**31
assert -(2**31) <= int(tir.const(-(2**31), "int32") - tir.const(1, "int32")) < 2**31
# divide by zero
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int32", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int32", lambda x, y: tir.floormod(x, y), ffloormod, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("int32", lambda x, y: tir.truncmod(x, y), ftruncmod, 1, 0)
# randomized check
check_tir_const_fold("int32", lambda x, y: x * y, fmul, skip_overflow=True)
check_tir_const_fold("int32", lambda x, y: x + y, fadd, skip_overflow=True)
check_tir_const_fold("int32", lambda x, y: x - y, fsub, skip_overflow=True)
check_tir_const_fold(
"int32",
lambda x, y: tir.floordiv(x, y),
ffloordiv,
y_range=(1, np.iinfo("int32").max),
skip_overflow=True,
)
check_tir_const_fold(
"int32",
lambda x, y: tir.truncdiv(x, y),
ftruncdiv,
y_range=(1, np.iinfo("int32").max),
skip_overflow=True,
)
check_tir_const_fold(
"int32",
lambda x, y: tir.floormod(x, y),
ffloormod,
y_range=(1, np.iinfo("int32").max),
skip_overflow=False,
)
check_tir_const_fold(
"int32",
lambda x, y: tir.truncmod(x, y),
ftruncmod,
y_range=(1, np.iinfo("int32").max),
skip_overflow=False,
)
@tvm.testing.requires_llvm()
def test_tir_uint32_const_fold():
"""Behavior check: folding u32 operation match platform u32 arithmetic"""
@T.prim_func
def imm_multiply(x: T.uint32, y: T.uint32) -> T.uint32:
T.evaluate(T.ret(x * y, dtype="uint32"))
@T.prim_func
def imm_add(x: T.uint32, y: T.uint32) -> T.uint32:
T.evaluate(T.ret(x + y, dtype="uint32"))
@T.prim_func
def imm_sub(x: T.uint32, y: T.uint32) -> T.uint32:
T.evaluate(T.ret(x - y, dtype="uint32"))
@T.prim_func
def imm_truncdiv(x: T.uint32, y: T.uint32) -> T.uint32:
T.evaluate(T.ret(T.truncdiv(x, y), dtype="uint32"))
@T.prim_func
def imm_floordiv(x: T.uint32, y: T.uint32) -> T.uint32:
T.evaluate(T.ret(T.floordiv(x, y), dtype="uint32"))
fmul = tvm.build(imm_multiply, target="llvm")
fadd = tvm.build(imm_add, target="llvm")
fsub = tvm.build(imm_sub, target="llvm")
ffloordiv = tvm.build(imm_floordiv, target="llvm")
ftruncdiv = tvm.build(imm_truncdiv, target="llvm")
# u32 overflow is not specified, only check for range
assert 0 <= int(tir.const(2**32 - 1, "uint32") + tir.const(1, "uint32")) < 2**32
# divide by zero
with pytest.raises(tvm.TVMError):
check_tir_const_fold("uint32", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0)
with pytest.raises(tvm.TVMError):
check_tir_const_fold("uint32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0)
# u8 mod folding is not implemented
assert not isinstance(tir.floormod(tir.const(7, "uint32"), tir.const(3, "uint32")), tir.IntImm)
assert not isinstance(tir.truncmod(tir.const(7, "uint32"), tir.const(3, "uint32")), tir.IntImm)
# randomized check
check_tir_const_fold("uint32", lambda x, y: x * y, fmul, skip_overflow=True)
check_tir_const_fold("uint32", lambda x, y: x + y, fadd, skip_overflow=True)
check_tir_const_fold("uint32", lambda x, y: x - y, fsub, skip_overflow=True)
check_tir_const_fold(
"uint32",
lambda x, y: tir.floordiv(x, y),
ffloordiv,
y_range=(1, np.iinfo("uint32").max),
skip_overflow=False,
)
check_tir_const_fold(
"uint32",
lambda x, y: tir.truncdiv(x, y),
ftruncdiv,
y_range=(1, np.iinfo("uint32").max),
skip_overflow=False,
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te, tir
from tvm import topi
from tvm.contrib import utils, clang
from tvm.script import tir as T
import numpy as np
import ctypes
import math
def test_nearbyint():
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
A_rounded = te.compute((m,), lambda *i: tvm.tir.nearbyint(A(*i)), name="A")
s = te.create_schedule(A_rounded.op)
f = tvm.build(s, [A, A_rounded], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(high=100, size=n).astype(A.dtype), dev)
a_rounded = tvm.nd.array(np.random.uniform(size=n).astype(A_rounded.dtype), dev)
f(a, a_rounded)
# Note that numpys rint rounds to nearest integer with
# ties to halfway is broken by rounding to even.
# So that 1.5 and 2.5 will round 2.
# This is the default rounding mode with libc as well.
# However one can set a different rounding mode and in that
# case numpy result might differ.
tvm.testing.assert_allclose(a_rounded.numpy(), np.rint(a.numpy()))
def test_round_intrinsics_on_int():
i = tvm.te.var("i", "int32")
for op in [tvm.tir.round, tvm.tir.trunc, tvm.tir.ceil, tvm.tir.floor, tvm.tir.nearbyint]:
assert op(tvm.tir.const(10, "int32")).value == 10
assert op(tvm.tir.const(True, "bool")).value == True
assert op(i).same_as(i)
assert tvm.tir.isnan(tvm.tir.const(10, "int32")).value == False
def test_unary_intrin():
test_funcs = [
(tvm.tir.exp10, lambda x: np.power(10, x)),
(tvm.tir.log2, lambda x: np.log2(x)),
(tvm.tir.log10, lambda x: np.log10(x)),
(tvm.tir.sinh, lambda x: np.sinh(x)),
(tvm.tir.cosh, lambda x: np.cosh(x)),
(tvm.tir.log1p, lambda x: np.log1p(x)),
(tvm.tir.asin, lambda x: np.arcsin(x)),
(tvm.tir.acos, lambda x: np.arccos(x)),
(tvm.tir.atan, lambda x: np.arctan(x)),
(tvm.tir.asinh, lambda x: np.arcsinh(x)),
(tvm.tir.acosh, lambda x: np.arccosh(x)),
(tvm.tir.atanh, lambda x: np.arctanh(x)),
]
def run_test(tvm_intrin, np_func):
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda *i: tvm_intrin(A(*i)), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0.1, 0.5, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-5, rtol=1e-5)
for func in test_funcs:
run_test(*func)
def test_binary_intrin():
test_funcs = [
(tvm.tir.atan2, lambda x1, x2: np.arctan2(x1, x2)),
(tvm.tir.nextafter, lambda x1, x2: np.nextafter(x1, x2)),
(tvm.tir.copysign, lambda x1, x2: np.copysign(x1, x2)),
(tvm.tir.hypot, lambda x1, x2: np.hypot(x1, x2)),
]
def run_test(tvm_intrin, np_func):
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.placeholder((m,), name="B")
C = te.compute((m,), lambda *i: tvm_intrin(A(*i), B(*i)), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np_func(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)
for func in test_funcs:
run_test(*func)
def test_ldexp():
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.placeholder((m,), name="B", dtype="int32")
C = te.compute((m,), lambda *i: tvm.tir.ldexp(A(*i), B(*i)), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.randint(0, 5, size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.ldexp(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)
dtype = tvm.testing.parameter("int32", "int64")
@tvm.testing.parametrize_targets("llvm", "vulkan -from_device=0")
def test_clz(target, dev, dtype):
target = tvm.target.Target(target)
if (
target.kind.name == "vulkan"
and dtype == "int64"
and not target.attrs.get("supports_int64", False)
):
pytest.xfail("Vulkan target does not support Int64 types")
def clz_np(x, dtype):
ceil_log2 = np.ceil(np.log2(x)).astype(dtype)
bits = int(dtype[-2:])
clz = bits - ceil_log2
clz[np.bitwise_and(x, x - 1) == 0] -= 1
return clz
m = te.var("m")
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.compute((m,), lambda *i: tvm.tir.clz(A(*i)), name="B")
s = te.create_schedule(B.op)
if target.kind.name == "vulkan":
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], target)
n = 10
highs = [10, 100, 1000, 10000, 100000, 1000000]
if dtype == "int64":
highs.append((1 << 63) - 1)
for high in highs:
a_np = np.random.randint(1, high=high, size=(n,), dtype=dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros((n,)).astype("int32"), dev)
f(a, b)
ref = clz_np(a_np, dtype)
np.testing.assert_equal(b.numpy(), ref)
@tvm.script.ir_module
class Module:
@T.prim_func
def test_tir_fma(A: T.handle, B: T.handle, C: T.handle, d: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_fma", "tir.noalias": True})
n = T.var("int32")
stride = T.var("int32")
stride_1 = T.var("int32")
stride_2 = T.var("int32")
stride_3 = T.var("int32")
A_1 = T.match_buffer(
A,
[n],
strides=[stride],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
B_1 = T.match_buffer(
B,
[n],
strides=[stride_1],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
C_1 = T.match_buffer(
C,
[n],
strides=[stride_2],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
d_1 = T.match_buffer(
d,
[n],
strides=[stride_3],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
# body
for i in T.serial(0, n):
d_1[(i * stride_3)] = (A_1[(i * stride)] * B_1[(i * stride_1)]) + C_1[(i * stride_2)]
def test_fma():
opt = tvm.transform.Sequential(
[
tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm"))),
tvm.tir.transform.LowerIntrin(),
]
)
mod = opt(Module)
assert mod["test_tir_fma"].body.body.value.op.name == "tir.call_llvm_pure_intrin"
if __name__ == "__main__":
test_nearbyint()
test_unary_intrin()
test_round_intrinsics_on_int()
test_binary_intrin()
test_ldexp()
test_clz()
test_fma()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ir_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import tvm.testing
from tvm.topi.math import cast
def test_for():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 2
body = ib.get()
assert isinstance(body, tvm.tir.Allocate)
body = body.body
assert isinstance(body, tvm.tir.For)
body = body.body
assert isinstance(body, tvm.tir.SeqStmt)
assert isinstance(body[1], tvm.tir.For)
def test_if():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
A = ib.pointer("float32", name="A")
tmod = tvm.tir.truncmod
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(tmod(i, 2) == 0):
A[i] = A[i] + 1
with ib.else_scope():
A[0] = A[i] + 2
body = ib.get()
assert A == A
assert isinstance(body, tvm.tir.For)
body = body.body
assert isinstance(body, tvm.tir.IfThenElse)
assert isinstance(body.condition, tvm.tir.EQ)
assert isinstance(body.then_case.indices[0], tvm.tir.Var)
assert list(body.else_case.indices) == [0]
def test_prefetch():
A = tvm.tir.decl_buffer((10, 20), name="A")
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
with ib.for_range(0, n, name="i") as i:
ib.emit(
tvm.tir.Prefetch(
A, [tvm.ir.Range.from_min_extent(i + 1, 2), tvm.ir.Range.from_min_extent(0, 20)]
)
)
body = ib.get()
assert body.body.bounds[0].extent.value == 2
def test_cpu():
n = 1024
dtype = "float32"
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
def test_device_ir(A, B, C):
n = A.shape[0]
max_threads = 8
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i") as i:
Cptr[i] = Aptr[i] + Bptr[i]
body = ib.get()
return body
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vector_add",
dtype=dtype,
)
s = te.create_schedule(C.op)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_target("llvm")
@tvm.testing.requires_gpu
def test_gpu():
n = te.size_var("n")
dtype = "float32"
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
idxd = tvm.tir.indexdiv
def test_device_ir(A, B, C):
n = A.shape[0]
max_threads = 32
ib = tvm.tir.ir_builder.create()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", idxd(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
idx = bx.var * max_threads + tx.var
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.if_scope(ib.likely(idx < n)):
Cptr[idx] = Aptr[idx] + Bptr[idx]
body = ib.get()
return body
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vector_add",
dtype=dtype,
)
s = te.create_schedule(C.op)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def check_target(target):
n = 1024
if not tvm.testing.device_enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_target("opencl")
check_target("cuda")
def test_while_vectorize():
"""Test while loop + vectorized inner loop"""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.while_loop(i[0] < num_iter):
with ib.for_range(0, n, kind="vectorize") as j:
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
def check_target(target, ir):
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a_np = np.random.uniform(size=n).astype(A.dtype)
b_np = np.random.uniform(size=n).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(a, b, c)
ref = num_iter * (a_np + b_np)
tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)
check_target("llvm", test_ir)
def test_while_collatz():
"""Test while loop + if"""
def collatz_ref(n):
a = n
i = 0
while a > 1:
if a % 2 == 1:
a = 3 * a + 1
else:
a = a >> 1
i += 1
return i
def collatz(ib, n, C):
i = ib.allocate("int32", (1,), name="i", scope="local")
a = ib.allocate("int32", (1,), name="a", scope="local")
i[0] = 0
a[0] = n
with ib.while_loop(a[0] > 1):
with ib.if_scope(tvm.tir.floormod(a[0], 2) == 1):
a[0] = 3 * a[0] + 1
with ib.else_scope():
a[0] = a[0] >> 1
i[0] += 1
C[n] = i[0]
def collatz_ir_cpu(C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
C = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i", kind="parallel") as i:
collatz(ib, i, C)
body = ib.get()
return body
n = 30
def check_target(target, ir):
C = te.extern(
(n,),
[],
lambda ins, outs: ir(outs[0]),
name="collatz",
dtype="int32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [C], target)
dev = tvm.device(target, 0)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(c)
ref = np.array([collatz_ref(i) for i in range(n)])
tvm.testing.assert_allclose(c.numpy(), ref)
check_target("llvm", collatz_ir_cpu)
def test_while_mandel():
n = 160
shape = (n * 2, n)
t = 300
def mandel_ref():
def complex_sqr(z):
return np.array([z[0] ** 2 - z[1] ** 2, z[1] * z[0] * 2])
pixels = np.zeros(shape)
for i in range(pixels.shape[0]):
for j in range(pixels.shape[1]):
c = np.array([-0.8, np.cos(t) * 0.2])
z = np.array([i / n - 1, j / n - 0.5]) * 2
iterations = 0
while np.linalg.norm(z) < 20 and iterations < 50:
z = complex_sqr(z) + c
iterations += 1
pixels[i, j] = 1 - iterations * 0.02
return pixels
def mandel(ib, i, j, pixels):
z = ib.allocate("float32", (2,), name="z", scope="local")
tmp = ib.allocate("float32", (1,), name="tmp", scope="local")
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
z[0] = (i / float(n) - 1) * 2
z[1] = (j / float(n) - 0.5) * 2
iterations[0] = 0
c = [-0.8, float(np.cos(t)) * 0.2]
def norm(z):
return tvm.tir.sqrt(z[0] * z[0] + z[1] * z[1])
with ib.while_loop(tvm.tir.all(norm(z) < 20, iterations[0] < 50)):
tmp[0] = z[0]
z[0] = z[0] * z[0] - z[1] * z[1] + c[0]
z[1] = z[1] * tmp[0] * 2 + c[1]
iterations[0] += 1
pixels[i, j] = 1 - iterations[0] * 0.02
def mandel_ir_cpu(C):
ib = tvm.tir.ir_builder.create()
ny = C.shape[0]
nx = C.shape[1]
C = ib.buffer_ptr(C)
with ib.for_range(0, ny, name="i", kind="parallel") as i:
with ib.for_range(0, nx, name="j") as j:
mandel(ib, i, j, C)
body = ib.get()
return body
def mandel_ir_gpu(C):
ib = tvm.tir.ir_builder.create()
ny = C.shape[0]
nx = C.shape[1]
C = ib.buffer_ptr(C)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
by = te.thread_axis("blockIdx.y")
ty = te.thread_axis("threadIdx.y")
max_threads = 16
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(nx + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
ib.scope_attr(by, "thread_extent", tvm.tir.indexdiv(ny + max_threads - 1, max_threads))
ib.scope_attr(ty, "thread_extent", max_threads)
tidx = bx * max_threads + tx
tidy = by * max_threads + ty
with ib.if_scope(tvm.tir.all(tidx < nx, tidy < ny)):
mandel(ib, tidy, tidx, C)
body = ib.get()
return body
ref = mandel_ref()
def check_target(target, ir):
if not tvm.testing.device_enabled(target):
return
C = te.extern(
shape,
[],
lambda ins, outs: ir(outs[0]),
name="mandel_ir",
dtype="float32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [C], target)
dev = tvm.device(target, 0)
c = tvm.nd.array(np.zeros(shape, dtype=C.dtype), dev)
func(c)
tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)
check_target("llvm", mandel_ir_cpu)
check_target("npvtx", mandel_ir_gpu)
check_target("cuda", mandel_ir_gpu)
check_target("vulkan", mandel_ir_gpu)
def test_while_binary_search():
def binary_search(ib, n, i, Aptr, Bptr, Cptr):
lo = ib.allocate("int32", (1,), name="lo", scope="local")
hi = ib.allocate("int32", (1,), name="hi", scope="local")
lo[0] = 0
hi[0] = n
v = Bptr[i]
with ib.while_loop(lo[0] < hi[0]):
mid = lo[0] + (hi[0] - lo[0] >> 1)
with ib.if_scope(Aptr[mid] < v):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
Cptr[i] = lo[0]
def searchsorted_ir_cpu(A, B, C, n):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i", kind="parallel") as i:
binary_search(ib, n, i, Aptr, Bptr, Cptr)
body = ib.get()
return body
def searchsorted_ir_gpu(A, B, C, n):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
binary_search(ib, n, tid, Aptr, Bptr, Cptr)
body = ib.get()
return body
n = 1024
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
def check_target(target, ir):
if not tvm.testing.device_enabled(target):
return
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: ir(ins[0], ins[1], outs[0], n),
name="searchsorted_ir",
dtype="int32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a_np = np.random.uniform(size=n).astype(A.dtype)
b_np = np.random.uniform(size=n).astype(B.dtype)
a_np = np.sort(a_np)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(a, b, c)
ref = np.searchsorted(a_np, b_np)
tvm.testing.assert_allclose(c.numpy(), ref)
check_target("llvm", searchsorted_ir_cpu)
check_target("cuda", searchsorted_ir_gpu)
check_target("nvptx", searchsorted_ir_gpu)
check_target("vulkan", searchsorted_ir_gpu)
@tvm.testing.requires_gpu
def test_dyn_shared():
n = te.size_var("n")
dtype = "float32"
A = te.placeholder((n,), name="A")
def test_device_ir(A, B):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
temp = ib.allocate(dtype, (n,), scope="shared.dyn") # n is symbolic size
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
temp[tx] = Aptr[tx]
depth = tvm.tir.log2(cast(n, "float32"))
with ib.for_range(0, cast(tvm.tir.ceil(depth), n.dtype)) as i:
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
d = n >> (i + 1)
with ib.if_scope(tx < d):
temp[tx] += temp[tx + d]
Bptr[0] = temp[0]
return ib.get()
B = te.extern(
(1,),
[A],
lambda ins, outs: test_device_ir(ins[0], outs[0]),
name="reduce",
dtype=dtype,
)
s = te.create_schedule(B.op)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
freduce = tvm.build(s, [A, B], target)
dev = tvm.device(target, 0)
for n in [512, 1024]:
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1, dtype=B.dtype), dev)
freduce(a, b)
tvm.testing.assert_allclose(b.numpy()[0], np.sum(a.numpy()), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
if __name__ == "__main__":
test_prefetch()
test_if()
test_for()
test_cpu()
test_gpu()
test_while_vectorize()
test_while_collatz()
test_while_mandel()
test_while_binary_search()
test_dyn_shared()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_lower_match_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
def _check(original, transformed):
mod = tvm.IRModule.from_expr(original)
mod = tvm.tir.transform.LowerMatchBuffer()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed)
def _check_fail(original):
mod = tvm.IRModule.from_expr(original)
with pytest.raises(tvm.TVMError):
mod = tvm.tir.transform.LowerMatchBuffer()(mod)
@T.prim_func
def buffer_load_store(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16))
C = T.match_buffer(c, (16, 16))
for i, j, k in T.grid(4, 16, 8):
with T.block():
T.reads(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2])
T.writes(A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2])
sub_A = T.match_buffer(
A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2], (4, 1, 2), offset_factor=1
)
sub_C = T.match_buffer(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2], (4, 2), offset_factor=1)
for ii, kk in T.grid(4, 2):
sub_A[ii, 0, kk] += sub_C[ii, kk]
@T.prim_func
def transformed_buffer_load_store(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16))
C = T.match_buffer(c, (16, 16))
for i, j, k in T.grid(4, 16, 8):
with T.block():
T.reads(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2])
T.writes(A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2])
for ii, kk in T.grid(4, 2):
A[i * 4 + ii, j, k * 2 + kk] += C[i * 4 + ii, k * 2 + kk]
@tvm.ir.register_op_attr("tir.intrin_test", "")
def intrin_test(data, elem_offset, stride_0, stride_1, shape_0, shape_1):
return 0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (32, 64, 128))
B = T.match_buffer(b, (64, 64, 64))
for i, j, k in T.grid(2, 64, 8):
with T.block():
T.reads([])
T.writes(A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16])
sub_A = T.match_buffer(
A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16],
(16, 1, 16),
strides=[8192, 128, 1],
offset_factor=1,
)
T.evaluate(
intrin_test(
sub_A.data,
sub_A.elem_offset,
sub_A.strides[0],
sub_A.strides[1],
sub_A.shape[0],
sub_A.shape[1],
)
)
for i, j, k in T.grid(64, 2, 8):
with T.block():
Bs_0 = T.var("int32")
Bs_1 = T.var("int32")
T.reads([])
T.writes(B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8])
sub_B = T.match_buffer(
B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8],
(32, 8),
strides=[Bs_0, Bs_1],
offset_factor=1,
)
T.evaluate(
intrin_test(
sub_B.data,
sub_B.elem_offset,
sub_B.strides[0],
sub_B.strides[1],
sub_B.shape[0],
sub_B.shape[1],
)
)
@T.prim_func
def transformed_opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (32, 64, 128))
B = T.match_buffer(b, (64, 64, 64))
for i, j, k in T.grid(2, 64, 8):
with T.block():
T.reads([])
T.writes(A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16])
T.evaluate(
intrin_test(
A.data,
i * 131072 + j * 128 + k * 16,
8192,
128,
16,
1,
)
)
for i, j, k in T.grid(64, 2, 8):
with T.block():
T.reads([])
T.writes(B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8])
T.evaluate(
intrin_test(
B.data,
i * 4096 + j * 2048 + k * 8,
64,
1,
32,
8,
)
)
@T.prim_func
def high_dim_opaque_access(a: T.handle) -> None:
A = T.match_buffer(a, (16, 32, 64))
for i, j, k in T.grid(16, 2, 4):
with T.block():
As_0 = T.var("int32")
As_1 = T.var("int32")
T.reads([])
T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16])
sub_A = T.match_buffer(
A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
(16, 16),
strides=[As_0, As_1],
offset_factor=1,
)
T.evaluate(
intrin_test(
sub_A.data,
sub_A.elem_offset,
sub_A.strides[0],
sub_A.strides[1],
sub_A.shape[0],
sub_A.shape[1],
)
)
@T.prim_func
def transformed_high_dim_opaque_access(a: T.handle) -> None:
A = T.match_buffer(a, (16, 32, 64))
for i, j, k in T.grid(16, 2, 4):
with T.block():
T.reads([])
T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16])
T.evaluate(
intrin_test(
A.data,
i * 2048 + j * 1024 + k * 16,
64,
1,
16,
16,
)
)
@T.prim_func
def high_dim_opaque_access_with_source_strides(a: T.handle) -> None:
A = T.match_buffer(a, (16, 32, 64), strides=[2576, 80, 1])
for i, j, k in T.grid(16, 2, 4):
with T.block():
As_0 = T.var("int32")
As_1 = T.var("int32")
T.reads([])
T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16])
sub_A = T.match_buffer(
A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
(16, 16),
strides=[As_0, As_1],
offset_factor=1,
)
T.evaluate(
intrin_test(
sub_A.data,
sub_A.elem_offset,
sub_A.strides[0],
sub_A.strides[1],
sub_A.shape[0],
sub_A.shape[1],
)
)
@T.prim_func
def transformed_high_dim_opaque_access_with_source_strides(a: T.handle) -> None:
A = T.match_buffer(a, (16, 32, 64), strides=[2576, 80, 1])
for i, j, k in T.grid(16, 2, 4):
with T.block():
T.reads([])
T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16])
T.evaluate(
intrin_test(
A.data,
i * 2576 + j * 1280 + k * 16,
80,
1,
16,
16,
)
)
@T.prim_func
def recursive_match(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (64, 64, 64))
B = T.match_buffer(b, (64, 64, 64))
for i, j, k in T.grid(64, 4, 4):
with T.block():
T.reads([])
T.writes(
[
A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
]
)
As_0 = T.var("int32")
As_1 = T.var("int32")
sub_A = T.match_buffer(
A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
(16, 16),
strides=[As_0, As_1],
offset_factor=1,
)
sub_B = T.match_buffer(
B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
(16, 16),
offset_factor=1,
)
for jj, kk in T.grid(4, 4):
with T.block():
T.reads([])
T.writes(
[
sub_A[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4],
sub_B[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4],
]
)
Ass_0 = T.var("int32")
Ass_1 = T.var("int32")
sub_sub_A = T.match_buffer(
sub_A[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4],
(4, 4),
strides=[Ass_0, Ass_1],
offset_factor=1,
)
sub_sub_B = T.match_buffer(
sub_B[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4],
(4, 4),
offset_factor=1,
)
T.evaluate(
intrin_test(
sub_sub_A.data,
sub_sub_A.elem_offset,
sub_sub_A.strides[0],
sub_sub_A.strides[1],
sub_sub_A.shape[0],
sub_sub_A.shape[1],
)
)
for jjj, kkk in T.grid(4, 4):
sub_sub_B[jjj, kkk] = 1
@T.prim_func
def transformed_recursive_match(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (64, 64, 64))
B = T.match_buffer(b, (64, 64, 64))
for i, j, k in T.grid(64, 4, 4):
with T.block():
T.reads([])
T.writes(
[
A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16],
]
)
for jj, kk in T.grid(4, 4):
with T.block():
T.reads([])
T.writes(
[
A[
i,
j * 16 + jj * 4 : j * 16 + jj * 4 + 4,
k * 16 + kk * 4 : k * 16 + kk * 4 + 4,
],
B[
i,
j * 16 + jj * 4 : j * 16 + jj * 4 + 4,
k * 16 + kk * 4 : k * 16 + kk * 4 + 4,
],
]
)
T.evaluate(
intrin_test(
A.data,
i * 4096 + j * 1024 + jj * 256 + k * 16 + kk * 4,
64,
1,
4,
4,
)
)
for jjj, kkk in T.grid(4, 4):
B[i, j * 16 + jj * 4 + jjj, k * 16 + kk * 4 + kkk] = 1
@T.prim_func
def symbolic_match(a: T.handle, b: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n * m, m))
B = T.match_buffer(b, (n * 2, m * 4))
for i in range(0, n):
with T.block():
T.reads([])
T.writes([A[i * m : i * m + n, 0:m], B[i * n : i * n + 2, 0 : m * 4]])
Bs_0 = T.var("int32")
Bs_1 = T.var("int32")
sub_A = T.match_buffer(A[i * m : i * m + m, 0:m], (m, m), offset_factor=1)
sub_B = T.match_buffer(
B[i * n : i * n + 2, 0 : m * 4], (2, m * 4), strides=[Bs_0, Bs_1], offset_factor=1
)
for ii, jj in T.grid(m, m):
sub_A[ii, jj] = 1
for j in range(0, 4):
T.evaluate(
intrin_test(
sub_B.data,
sub_B.elem_offset,
sub_B.strides[0],
sub_B.strides[1],
sub_B.shape[0],
sub_B.shape[1],
)
)
@T.prim_func
def transformed_symbolic_match(a: T.handle, b: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n * m, m))
B = T.match_buffer(b, (n * 2, m * 4))
for i in range(0, n):
with T.block():
T.reads([])
T.writes([A[i * m : i * m + n, 0:m], B[i * n : i * n + 2, 0 : m * 4]])
for ii, jj in T.grid(m, m):
A[i * m + ii, jj] = 1
for j in range(0, 4):
T.evaluate(
intrin_test(
B.data,
i * n * (m * 4),
m * 4,
1,
2,
m * 4,
)
)
@T.prim_func
def rank0_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
B = T.match_buffer(b, (8, 8))
for i, j in T.grid(8, 8):
with T.block():
T.reads([])
T.writes([A[i, j], B[i, j]])
sub_A = T.match_buffer(A[i, j], (), offset_factor=1)
sub_B = T.match_buffer(B[i, j], (), offset_factor=1)
sub_A[()] = 1
T.evaluate(
intrin_test(
sub_B.data,
sub_B.elem_offset,
0,
0,
0,
0,
)
)
@T.prim_func
def transformed_rank0_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
B = T.match_buffer(b, (8, 8))
for i, j in T.grid(8, 8):
with T.block():
T.reads([])
T.writes([A[i, j], B[i, j]])
A[i, j] = 1
T.evaluate(
intrin_test(
B.data,
i * 8 + j,
0,
0,
0,
0,
)
)
@T.prim_func
def fail_match_load(a: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 8):
with T.block():
T.reads(A[i, j])
T.writes([])
sub_A = T.match_buffer(A[i, j], (), elem_offset=0)
T.evaluate(sub_A[()])
@T.prim_func
def fail_match_store(a: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 8):
with T.block():
T.reads([])
T.writes(A[i, j])
sub_A = T.match_buffer(A[i, j], (), elem_offset=0)
sub_A[()] = 1
@T.prim_func
def fail_buffer_bind(a: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 2):
with T.block():
stride = T.var("int32")
sub_A = T.match_buffer(
A[i, j * 4 : j * 4 + 4], (1, 4), strides=[stride, stride], offset_factor=1
)
for jj in range(0, 4):
sub_A[i, j * 4 + jj] = 1
@T.prim_func
def fail_match_func_param(a: T.handle, m: T.handle, n: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 2):
with T.block():
sub_A = T.match_buffer(A[i, j * 4 : j * 4 + 4], (1, 4), strides=[m, n], offset_factor=1)
for jj in range(0, 4):
sub_A[i, j * 4 + jj] = 1
def test_buffer_load_store():
_check(buffer_load_store, transformed_buffer_load_store)
def test_opaque_access():
_check(opaque_access, transformed_opaque_access)
def test_high_dim_opaque_access():
_check(high_dim_opaque_access, transformed_high_dim_opaque_access)
_check(
high_dim_opaque_access_with_source_strides,
transformed_high_dim_opaque_access_with_source_strides,
)
def test_recursive_match():
_check(recursive_match, transformed_recursive_match)
def test_symbolic_match():
_check(symbolic_match, transformed_symbolic_match)
def test_rank0_buffer():
_check(rank0_buffer, transformed_rank0_buffer)
def test_fail_load_store():
_check_fail(fail_match_load)
_check_fail(fail_match_store)
def test_fail_buffer_bind():
_check_fail(fail_buffer_bind)
def test_fail_match_func_param():
_check_fail(fail_match_func_param)
if __name__ == "__main__":
test_buffer_load_store()
test_opaque_access()
test_high_dim_opaque_access()
test_recursive_match()
test_symbolic_match()
test_rank0_buffer()
test_fail_load_store()
test_fail_buffer_bind()
test_fail_match_func_param()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_nodes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te, ir
import numpy as np
def test_const():
x = tvm.tir.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_te_const():
x = tvm.te.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_scalar_dtype_inference():
for data in [
True,
bool(1),
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
]:
assert tvm.tir.const(data).dtype == str(np.array(data).dtype)
assert tvm.tir.const(1).dtype == "int32"
assert tvm.tir.const(1.0).dtype == "float32"
for data in [
True,
bool(1),
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
]:
assert tvm.runtime.convert(data).dtype == str(np.array(data).dtype)
assert tvm.runtime.convert(1).dtype == "int32"
assert tvm.runtime.convert(1.0).dtype == "float32"
def test_make():
x = tvm.tir.const(1, "int32")
y = te.var("x")
z = x + y
assert isinstance(tvm.tir.max(x, y), tvm.tir.Max)
assert isinstance(tvm.tir.min(x, y), tvm.tir.Min)
def test_ir():
x = tvm.tir.const(1, "int32")
y = tvm.tir.IntImm("int32", 1)
z = x + y
stmt = tvm.tir.Evaluate(z)
assert isinstance(stmt, tvm.tir.Evaluate)
def test_ir2():
buf_size = te.var("size")
x = te.var("n")
storage_type = ir.PrimType("int32")
handle_type = ir.PointerType(storage_type)
array = te.var("array", handle_type)
buf = tvm.tir.decl_buffer([buf_size], "int32", data=array)
st = tvm.tir.BufferStore(buf, x + 1, [1])
assert isinstance(st, tvm.tir.BufferStore)
assert st.buffer == buf
assert st.buffer.data == array
def test_let():
x = te.var("x")
y = te.var("y")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
def test_cast():
x = te.var("x", dtype="float32")
y = x.astype("int32")
z = x.astype("float32x4")
assert isinstance(y, tvm.tir.Cast)
assert isinstance(z, tvm.tir.Broadcast)
assert z.lanes == 4
s = tvm.tir.StringImm("s")
with pytest.raises(tvm.error.TVMError) as cm:
s.astype("int")
assert "Can't cast a handle to other types" in str(cm.execption)
def test_attr():
x = te.var("x")
y = te.var("y")
stmt = tvm.tir.AttrStmt(y, "stride", 10, tvm.tir.Evaluate(x + 1))
assert stmt.node == y
a = tvm.runtime.convert(1)
assert a.value == 1
try:
a.no_field
assert False
except AttributeError:
pass
def test_basic():
a = te.var("a")
b = te.var("b")
c = a + b
assert str(c) == "(%s: int32 + %s: int32)" % (a.name, b.name)
def test_stmt():
x = tvm.tir.Evaluate(0)
tvm.tir.For(te.var("i"), 0, 1, tvm.tir.ForKind.SERIAL, x)
def test_dir():
x = te.var("x")
dir(x)
def test_dtype():
x = te.var("x")
assert x.dtype == "int32"
y = te.var("y")
assert (x > y).dtype == "bool"
def test_any():
x = te.var("x")
y = te.var("y")
z = te.var("z")
try:
t = x or x
assert False
except ValueError:
pass
try:
tvm.tir.any()
assert False
except ValueError:
pass
assert str(tvm.tir.any(x < y)) == "(%s: int32 < %s: int32)" % (x.name, y.name)
assert str(tvm.tir.any(x < y, x > z)) == "((%s: int32 < %s: int32) || (%s > %s: int32))" % (
x.name,
y.name,
x.name,
z.name,
)
assert str(
tvm.tir.any(x < y, y > z + 1, x < z * 2)
) == "(((%s: int32 < %s: int32) || (%s > (%s: int32 + 1))) || (%s < (%s*2)))" % (
x.name,
y.name,
y.name,
z.name,
x.name,
z.name,
)
def test_all():
x = te.var("x")
y = te.var("y")
z = te.var("z")
try:
t = x and x
assert False
except ValueError:
pass
try:
tvm.tir.all()
assert False
except ValueError:
pass
assert str(tvm.tir.all(x < y)) == "(%s: int32 < %s: int32)" % (x.name, y.name)
assert str(tvm.tir.all(x < y, x > z)) == "((%s: int32 < %s: int32) && (%s > %s: int32))" % (
x.name,
y.name,
x.name,
z.name,
)
assert str(
tvm.tir.all(x < y, y > z + 1, x < z * 2)
) == "(((%s: int32 < %s: int32) && (%s > (%s: int32 + 1))) && (%s < (%s*2)))" % (
x.name,
y.name,
y.name,
z.name,
x.name,
z.name,
)
def test_bitwise():
x = te.var("x")
y = te.var("y")
assert str(x << y) == "@tir.shift_left(x: int32, y: int32, dtype=int32)"
assert str(x >> y) == "@tir.shift_right(x: int32, y: int32, dtype=int32)"
assert str(x & y) == "@tir.bitwise_and(x: int32, y: int32, dtype=int32)"
assert str(x | y) == "@tir.bitwise_or(x: int32, y: int32, dtype=int32)"
assert str(x ^ y) == "@tir.bitwise_xor(x: int32, y: int32, dtype=int32)"
assert str(10 & x) == "@tir.bitwise_and(10, x: int32, dtype=int32)"
assert str(10 | x) == "@tir.bitwise_or(10, x: int32, dtype=int32)"
assert str(10 ^ x) == "@tir.bitwise_xor(10, x: int32, dtype=int32)"
assert str(10 >> x) == "@tir.shift_right(10, x: int32, dtype=int32)"
assert str(10 << x) == "@tir.shift_left(10, x: int32, dtype=int32)"
assert str(10 % x) == "floormod(10, x: int32)"
assert str(~x) == "@tir.bitwise_not(x: int32, dtype=int32)"
assert (tvm.tir.const(1, "int8x2") >> 1).dtype == "int8x2"
assert (x >> tvm.tir.const(1, "int32x2")).dtype == "int32x2"
assert (te.var("z", "int8x2") << tvm.tir.const(1, "int8x2")).dtype == "int8x2"
def test_float_bitwise():
t = tvm.tir.const(1.5, dtype="float32")
for test in [
lambda lhs, rhs: lhs << rhs,
lambda lhs, rhs: lhs >> rhs,
lambda lhs, rhs: lhs | rhs,
lambda lhs, rhs: lhs ^ rhs,
lambda lhs, rhs: lhs & rhs,
]:
try:
test(t, 10.0)
assert False
except tvm.TVMError:
pass
try:
~t
assert False
except RuntimeError:
pass
def test_shift_bounds():
x = te.var("x")
for test in [lambda lhs, rhs: lhs << rhs, lambda lhs, rhs: lhs >> rhs]:
# negative case
for testcase in [(x, -1), (x, 32)]:
try:
test(*testcase)
assert False
except tvm.TVMError:
pass
# positive case
for testcase in [(x, 0), (x, 16), (x, 31)]:
test(*testcase)
def test_divide_by_zero():
for test in [
lambda lhs, rhs: tvm.tir.floormod(lhs, rhs),
lambda lhs, rhs: tvm.tir.floordiv(lhs, rhs),
lambda lhs, rhs: tvm.tir.truncmod(lhs, rhs),
lambda lhs, rhs: tvm.tir.truncdiv(lhs, rhs),
lambda lhs, rhs: tvm.tir.div(lhs, rhs),
]:
try:
test(tvm.tir.const(5, "int32"), tvm.tir.const(0, "int32"))
assert False
except tvm.TVMError:
pass
def test_infinity():
assert str(tvm.tir.infinity("float16")) == "inff16"
assert str(tvm.tir.infinity("float32")) == "inff32"
assert str(tvm.tir.infinity("float64")) == "inff64"
def test_isnan():
x = te.var("x", "float32")
assert str(tvm.tir.isnan(x)) == "@tir.isnan(x: float32, dtype=bool)"
assert str(tvm.tir.isnan(x).dtype) == "bool"
y = te.var("y", "float16")
assert str(tvm.tir.isnan(y)) == "@tir.isnan(cast(float32, y: float16), dtype=bool)"
z = te.var("z", "int32")
assert str(tvm.tir.isnan(z)) == "False"
k = te.var("k", "int8x2")
assert str(tvm.tir.isnan(k).dtype) == "uint1x2"
def test_equality():
a = te.var("a")
b = te.var("b")
c = a == b
assert not c
d = c != c
assert not d
def test_equality_string_imm():
x = "a"
y = tvm.tir.StringImm(x)
x == y.value
x == y
def test_prim_func():
x = te.var("x")
y = te.var("y")
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func = tvm.tir.PrimFunc([x, y, b], stmt)
# make sure we can print
func.astext()
assert func.buffer_map[func.params[2]].same_as(b)
assert len(func.buffer_map) == 1
f2 = func.with_attr({"calling_conv": 1, "tir.noalias": True})
assert f2.attrs["calling_conv"].value == 1
assert func.attrs is None
def test_vars():
x = tvm.tir.Var("xyz", "int8")
assert x.dtype == "int8"
ptype = tvm.ir.PointerType(tvm.ir.PrimType("float"))
x = tvm.tir.Var("xyz", ptype)
assert x.dtype == "handle"
assert x.type_annotation == ptype
assert isinstance(ptype.element_type, tvm.ir.PrimType)
def test_scoped_storage_vars():
dtype = "float"
storage_scope = "global.texture"
ptype = tvm.ir.PointerType(tvm.ir.PrimType(dtype), storage_scope)
x = tvm.tir.Var("xyz", ptype)
assert x.dtype == "handle"
assert x.type_annotation == ptype
assert x.type_annotation.storage_scope == storage_scope
assert isinstance(ptype.element_type, tvm.ir.PrimType)
def test_buffer_load_store():
b = tvm.tir.decl_buffer((10,), "float32")
x = tvm.tir.BufferLoad(b, [0])
assert isinstance(x, tvm.tir.BufferLoad)
assert x.dtype == "float32"
assert x.buffer == b
s = tvm.tir.BufferStore(b, 0.1, [0])
assert isinstance(s, tvm.tir.BufferStore)
s = tvm.tir.BufferRealize(b, [tvm.ir.Range(0, 1)], True, tvm.tir.Evaluate(0))
assert isinstance(s, tvm.tir.BufferRealize)
def test_intimm_cond():
x = tvm.runtime.convert(1)
y = tvm.runtime.convert(1)
s = {x}
assert y in s
assert x == y
assert x < 20
assert not (x >= 20)
assert x < 10 and y < 10
assert not tvm.runtime.convert(x != 1)
assert x == 1
def test_block_blockrealize():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
vx = tvm.tir.IterVar((16, 16), "vx", 0)
vx_var = vx.var
vy = tvm.tir.IterVar((16, 16), "vy", 2)
vy_var = vy.var
A = tvm.tir.decl_buffer((16), "float32")
B = tvm.tir.decl_buffer((16, 16), "float32")
alloc_buffer = tvm.tir.decl_buffer((16, 16), "float32")
match_buffer = tvm.tir.decl_buffer((16, 16), "float32")
init_body = tvm.tir.BufferStore(A, 0.0, [vx_var])
body = tvm.tir.BufferStore(
A,
tvm.tir.BufferLoad(A, [vx_var]) + tvm.tir.BufferLoad(B, [vx_var, vy_var]),
[vx_var],
)
reads = [
tvm.tir.BufferRegion(
B, [tvm.ir.Range.from_min_extent(vx_var, 1), tvm.ir.Range.from_min_extent(vy_var, 1)]
)
]
writes = [tvm.tir.BufferRegion(A, [tvm.ir.Range.from_min_extent(vx_var, 1)])]
block_match_buffer = tvm.tir.MatchBufferRegion(
match_buffer, tvm.tir.BufferRegion(B, [tvm.ir.Range(0, 16), tvm.ir.Range(0, 16)])
)
block = tvm.tir.Block(
[vx, vy],
reads,
writes,
"block",
body,
init=init_body,
alloc_buffers=[alloc_buffer],
match_buffers=[block_match_buffer],
annotations={"attr_key": "attr_value"},
)
# Checking Block
assert isinstance(block, tvm.tir.Block)
# Checking iter_vars
assert block.iter_vars[0] == vx
assert block.iter_vars[1] == vy
# Checking reads/writes region
assert isinstance(block.reads[0], tvm.tir.BufferRegion)
assert block.reads[0].buffer == B
assert block.reads[0].region[0].min == vx_var
assert block.reads[0].region[1].min == vy_var
assert isinstance(block.writes[0], tvm.tir.BufferRegion)
assert block.writes[0].buffer == A
assert block.writes[0].region[0].min == vx_var
assert block.writes[0].region[0].extent == 1
# Checking name_hint
assert block.name_hint == "block"
# Checking body
assert block.body == body
# Checking init
assert block.init == init_body
# Checking alloc_buffers
assert block.alloc_buffers[0] == alloc_buffer
# Checking match_buffers
assert block.match_buffers[0].buffer == match_buffer
assert isinstance(block.match_buffers[0].source, tvm.tir.BufferRegion)
assert block.match_buffers[0].source.buffer == B
assert block.match_buffers[0].source.region[0].min == 0
assert block.match_buffers[0].source.region[0].extent == 16
# Checking BlockRealize
block_realize = tvm.tir.BlockRealize([x, y], tvm.tir.const(True, "bool"), block)
assert isinstance(block_realize, tvm.tir.BlockRealize)
assert block_realize.iter_values[0] == x
assert block_realize.iter_values[1] == y
assert block_realize.predicate == tvm.tir.const(True, "bool")
assert block_realize.block == block
# make sure we can print using ReprPrinter
str(block)
str(block_realize)
# make sure we can print using TIRTextPrinter
func = tvm.tir.PrimFunc([], block_realize)
output = func.astext()
assert output.find("meta[tir.BlockRealise]") == -1
assert output.find("bind") != -1
assert output.find("reads") != -1
assert output.find("writes") != -1
assert output.find("alloc_buffer") != -1
assert output.find("match_buffer") != -1
assert output.find("attr") != -1
assert output.find("with init()") != -1
def test_tir_allocate():
dtype = "int8"
storage_scope = "global"
ptype = tvm.ir.PointerType(tvm.ir.PrimType(dtype), storage_scope)
a = te.var("buffer", ptype)
allocate = tvm.tir.Allocate(
buffer_var=a,
dtype=dtype,
extents=[2, 2],
condition=tvm.get_global_func("tir.const_true")(dtype, None),
body=tvm.tir.Evaluate(2 + 1),
annotations={
"attr1": "foo",
"attr2": "bar",
},
)
assert allocate.buffer_var == a
assert allocate.dtype == "int8"
assert list(allocate.extents) == [2, 2]
assert allocate.annotations["attr1"] == "foo"
assert allocate.annotations["attr2"] == "bar"
# make sure we can print using TIRTextPrinter
func = tvm.tir.PrimFunc([], allocate)
output = func.astext()
assert (
output.find(
'allocate(buffer: Pointer(global int8), int8, [2, 2]), storage_scope = global, annotations = {"attr2": "bar", "attr1": "foo"})'
)
!= -1
)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_op_types.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
import tvm.testing
from tvm import tir
def test_tir_op_tvm_tuple():
x = tir.Var("x", dtype="float32")
y = tir.Var("y", dtype="float32")
z = tir.Var("z", dtype="float32")
expr = tir.tvm_tuple(x, y, z, 1, 2, 3)
assert expr.op.name == "tir.tvm_tuple"
def test_tir_op_tvm_struct_get():
x = tir.Var("x", dtype="handle")
expr = tir.tvm_struct_get(x, 1, 2, dtype="int32")
assert expr.op.name == "tir.tvm_struct_get"
def test_tir_op_tvm_struct_set():
x = tir.Var("x", dtype="handle")
expr = tir.tvm_struct_set(x, 1, 2, 3)
assert expr.op.name == "tir.tvm_struct_set"
def test_tir_op_address_of():
buffer = tir.decl_buffer((128), "float32")
expr = tir.address_of(buffer[0])
assert expr.op.name == "tir.address_of"
def test_tir_op_lookup_param():
expr = tir.lookup_param("p0")
assert expr.op.name == "tir.lookup_param"
def test_tir_op_reinterpret():
x = tir.Var("x", dtype="int32")
expr = tir.reinterpret("float32", x)
assert expr.op.name == "tir.reinterpret"
def test_tir_op_isnullptr():
x = tir.Var("x", dtype="int32")
expr = tir.isnullptr(x)
assert expr.op.name == "tir.isnullptr"
def test_tir_op_call_assume():
x = tir.Var("x", dtype="int32")
expr = tir.assume(cond=x)
assert expr.op.name == "tir.assume"
def test_tir_op_call_undef():
expr = tir.undef()
assert expr.op.name == "tir.undef"
def test_tir_op_call_likely():
x = tir.Var("x", dtype="int32")
expr = tir.likely(cond=x)
assert expr.op.name == "tir.likely"
def test_tir_op_tvm_thread_allreduce():
x = tir.Var("x", "int32")
buffer = tir.decl_buffer((128), "float32")
y = tir.Var("y", "handle")
z = tir.Var("z", "int32")
expr = tir.tvm_thread_allreduce(x, buffer[0], True, y, z)
assert expr.op.name == "tir.tvm_thread_allreduce"
def test_tir_op_type_annotation():
expr = tir.type_annotation("int32")
assert expr.op.name == "tir.type_annotation"
def test_tir_op_tvm_access_ptr():
buffer = tir.decl_buffer((128), "float32")
expr = tir.tvm_access_ptr("float32", buffer.data, 0, 1, 2)
assert expr.op.name == "tir.tvm_access_ptr"
def test_tir_op_tvm_throw_last_error():
expr = tir.tvm_throw_last_error()
assert expr.op.name == "tir.tvm_throw_last_error"
def test_tir_op_tvm_load_matrix_sync():
buffer = tir.decl_buffer((16, 16), "float32")
x = tir.Var("x", "handle")
expr = tir.tvm_load_matrix_sync(buffer.data, 16, 16, 16, 0, x, 128, "row_major")
assert expr.op.name == "tir.tvm_load_matrix_sync"
def test_tir_op_tvm_store_matrix_sync():
buffer = tir.decl_buffer((16, 16), "float32")
x = tir.Var("x", "handle")
expr = tir.tvm_store_matrix_sync(buffer.data, 16, 16, 16, 0, x, 128, "row_major")
assert expr.op.name == "tir.tvm_store_matrix_sync"
def test_tir_op_tvm_mma_sync():
buffer_0 = tir.decl_buffer((16, 16), "float32")
buffer_1 = tir.decl_buffer((16, 16), "float32")
buffer_2 = tir.decl_buffer((16, 16), "float32")
buffer_3 = tir.decl_buffer((16, 16), "float32")
expr = tir.tvm_mma_sync(buffer_0.data, 0, buffer_1.data, 0, buffer_2.data, 0, buffer_3.data, 0)
assert expr.op.name == "tir.tvm_mma_sync"
def test_tir_op_tvm_bmma_sync():
buffer_0 = tir.decl_buffer((16, 16), "float32")
buffer_1 = tir.decl_buffer((16, 16), "float32")
buffer_2 = tir.decl_buffer((16, 16), "float32")
buffer_3 = tir.decl_buffer((16, 16), "float32")
expr = tir.tvm_bmma_sync(buffer_0.data, 0, buffer_1.data, 0, buffer_2.data, 0, buffer_3.data, 0)
assert expr.op.name == "tir.tvm_bmma_sync"
def test_tir_op_tvm_fill_fragment():
buffer = tir.decl_buffer((16, 16), "float32")
expr = tir.tvm_fill_fragment(buffer.data, 16, 16, 16, 0, 0)
assert expr.op.name == "tir.tvm_fill_fragment"
def test_tir_op_ptx_mma():
buffer_a = tir.decl_buffer([32], "int4", scope="local")
buffer_b = tir.decl_buffer([16], "uint4", scope="local")
buffer_c = tir.decl_buffer([4], "int32", scope="local")
expr = tir.ptx_mma(
"int32",
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
buffer_a.data,
0,
buffer_b.data,
0,
buffer_c.data,
0,
False,
)
assert expr.op.name == "tir.ptx_mma"
def test_tir_op_ptx_mma_sp():
buffer_a = tir.decl_buffer([32], "int4", scope="local")
buffer_b = tir.decl_buffer([16], "uint4", scope="local")
buffer_c = tir.decl_buffer([4], "int32", scope="local")
buffer_d = tir.decl_buffer([1], "uint32", scope="local")
expr = tir.ptx_mma_sp(
"int32",
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
buffer_a.data,
0,
buffer_b.data,
0,
buffer_c.data,
0,
buffer_d.data,
0,
0,
False,
)
assert expr.op.name == "tir.ptx_mma_sp"
def test_tir_op_mma_store():
x = tir.Var("x", dtype="int32")
y = tir.Var("y", dtype="int32")
buffer_w = tir.decl_buffer([16, 8], dtype="int32", scope="warp", offset_factor=1)
buffer = tir.decl_buffer(
[16, 16], dtype="int32", scope="global", offset_factor=1, strides=[x, y]
)
expr = tir.mma_store(
"int32",
16,
16,
buffer.access_ptr("w"),
buffer_w.data,
buffer_w.elem_offset,
x,
)
assert expr.op.name == "tir.mma_store"
def test_tir_op_mma_fill():
buffer_w = tir.decl_buffer([16, 8], dtype="int32", scope="warp", offset_factor=1)
expr = tir.mma_fill("int32", 8, buffer_w.data, buffer_w.elem_offset)
assert expr.op.name == "tir.mma_fill"
def test_op_ptx_ldmatrix():
buffer_shared = tir.decl_buffer([16, 16], "float16", scope="shared")
buffer_local = tir.decl_buffer([8], "float16", scope="local")
expr = tir.ptx_ldmatrix(
"float16", False, 4, ".b16", buffer_local.data, 0, buffer_shared.data, 0
)
assert expr.op.name == "tir.ptx_ldmatrix"
def test_op_ptx_cp_async():
buffer_shared = tir.decl_buffer([16, 16], "float16", scope="shared")
buffer_local = tir.decl_buffer([8], "float16", scope="local")
expr = tir.ptx_cp_async("float16", buffer_shared.data, 0, buffer_local.data, 0, 16)
assert expr.op.name == "tir.ptx_cp_async"
def test_op_ptx_commit_group():
expr = tir.ptx_commit_group()
assert expr.op.name == "tir.ptx_commit_group"
def test_op_ptx_wait_group():
expr = tir.ptx_wait_group(8)
assert expr.op.name == "tir.ptx_wait_group"
def test_tir_op_vectorlow():
buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1)
vec = buffer.vload([0, 0], dtype="int8x16")
expr = tir.vectorlow("int8x8", vec)
assert expr.op.name == "tir.vectorlow"
def test_tir_op_vectorhigh():
buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1)
vec = buffer.vload([0, 0], dtype="int8x16")
expr = tir.vectorhigh("int8x8", vec)
assert expr.op.name == "tir.vectorhigh"
def test_tir_op_vectorcombine():
buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1)
vec = buffer.vload([0, 0], dtype="int8x16")
expr = tir.vectorcombine("int8x8", vec, vec)
assert expr.op.name == "tir.vectorcombine"
def test_tir_op_shift_left():
x = tir.Var("x", dtype="int32")
y = tir.Var("x", dtype="int32")
expr = tir.shift_left(x, y)
assert expr.op.name == "tir.shift_left"
def test_tir_op_shift_right():
x = tir.Var("x", dtype="int32")
y = tir.Var("x", dtype="int32")
expr = tir.shift_right(x, y)
assert expr.op.name == "tir.shift_right"
def test_tir_op_TVMBackendAllocWorkspace():
expr = tir.TVMBackendAllocWorkspace(0, 1, 2, 3, 4)
assert expr.op.name == "tir.TVMBackendAllocWorkspace"
def test_tir_op_TVMBackendFreeWorkspace():
buffer = tir.decl_buffer((128), "float32")
expr = tir.TVMBackendFreeWorkspace(0, 1, buffer.data)
assert expr.op.name == "tir.TVMBackendFreeWorkspace"
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def check_throws(f):
try:
f()
except tvm.error.TVMError:
pass
else:
raise AssertionError("Should have raised an exception but didn't.")
def test_const_fold():
def check(f, *args):
x = f(*[tvm.tir.const(x, "int32") for x in args])
y = f(*args)
if not isinstance(x, (tvm.tir.IntImm,)) or x.value != int(y):
raise ValueError("check error: %s vs %s " % (x, y))
tmod = tvm.tir.truncmod
check(lambda x, y: x + y, 3, 4)
check(lambda x, y: x * y, 3, 12)
check(lambda x, y: x * y - 10, 3, 12)
check(lambda x, y: x - tmod(y, 10), 3, 12)
check(lambda x, y: x // y + 10, 100, 12)
check(lambda x, y: x & y + 10, 112, 128)
check(lambda x, y: x > y, 112, 128)
check(lambda x, y: x < y, 112, 128)
check(lambda x, y: x <= y, 112, 128)
check(lambda x, y: x >= y, 112, 128)
check(lambda x, y: (x | y) ^ 10, 112, 128)
def test_const_fold2():
x = te.var("x")
tmod = tvm.tir.truncmod
tdiv = tvm.tir.truncdiv
assert (x + 0).same_as(x)
assert (0 + x).same_as(x)
assert (x - 0).same_as(x)
assert tmod(x, 1).value == 0
assert (x * 1).same_as(x)
assert (1 * x).same_as(x)
assert isinstance(tdiv(1, x), tvm.tir.Div)
def test_const_fold3():
# Test that using ints with logic operations is forbidden
x = te.var("x")
for val in [0, 1]:
for func in [tvm.tir.all, tvm.tir.any]:
check_throws(lambda: func(tvm.tir.const(val, "uint1"), x))
check_throws(lambda: func(x, tvm.tir.const(val, "uint1")))
# Test const folding when both arguments are const
for tvm_func, py_func in [
(tvm.tir.all, lambda a, b: a and b),
(tvm.tir.any, lambda a, b: a or b),
]:
for v1 in [0, 1]:
for v2 in [0, 1]:
assert tvm.ir.structural_equal(
tvm_func(tvm.tir.const(v1, "uint1"), tvm.tir.const(v2, "uint1")),
tvm.tir.const(py_func(v1, v2), "uint1"),
)
x = te.var("x", "uint1")
true = tvm.tir.const(1, "uint1")
false = tvm.tir.const(0, "uint1")
assert tvm.tir.all(x, true).same_as(x)
assert tvm.tir.all(true, x).same_as(x)
assert tvm.tir.any(x, false).same_as(x)
assert tvm.tir.any(false, x).same_as(x)
assert tvm.tir.all(x, false).same_as(false)
assert tvm.tir.all(false, x).same_as(false)
assert tvm.tir.any(x, true).same_as(true)
assert tvm.tir.any(true, x).same_as(true)
def test_const_fold4():
x1 = tvm.tir.const(4, "int32")
x2 = x1 + 5
tdiv = tvm.tir.truncdiv
assert isinstance(x2, tvm.tir.IntImm) and x2.value == 9
x3 = tdiv(x2, 3)
assert isinstance(x3, tvm.tir.IntImm) and x3.value == 3
x4 = x3 + 0.55
assert isinstance(x4, tvm.tir.FloatImm) and abs(x4.value - 3.55) < 1e-6
x5 = te.ceil(x4)
assert isinstance(x5, tvm.tir.FloatImm) and x5.value == 4
x6 = x5.astype("int")
assert isinstance(x6, tvm.tir.IntImm) and x6.value == 4, "x6={}".format(x6)
y = (te.round((tvm.tir.const(6.5, "float32") - 1) / 1.5) + 2).astype("int")
assert isinstance(y, tvm.tir.IntImm) and y.value == 6
def test_binary_dtype_match():
def verify_general_dtype_support(f, is_conditional=False):
rules = [
[("bool", "int32"), "int32"],
[("int32", "float32"), "float32"],
[("int32", "int64"), "int64"],
[("uint32", "int8"), "uint32"],
[("uint32", "int32"), "uint32"],
]
for (lhs_dtype, rhs_dtype), out_dtype in rules:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
out = f(lhs, rhs)
if not is_conditional:
assert out.dtype == out_dtype
else:
assert out.dtype == "bool"
if hasattr(out, "a"):
assert out.a.dtype == out_dtype
assert out.b.dtype == out_dtype
elif hasattr(out, "args"):
# CallOp
assert out.args[0].dtype == out_dtype
assert out.args[1].dtype == out_dtype
else:
raise ValueError("Unknown binary op format!")
def verify_callop_float_only(f):
for lhs_dtype in ["int32", "float32", "float64"]:
for rhs_dtype in ["int32", "float32", "float64"]:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if "float" not in lhs_dtype and "float" not in rhs_dtype:
check_throws(lambda: f(lhs, rhs))
elif "float" in lhs_dtype:
out = f(lhs, rhs)
# Upcasting for floating point types
dtypes = [lhs_dtype, rhs_dtype]
if "float64" in dtypes:
target_dtype = "float64"
elif "float32" in dtypes:
target_dtype = "float32"
else:
target_dtype = "int32"
assert out.dtype == target_dtype
# Final inputs are the right type
assert out.args[0].dtype == target_dtype
assert out.args[1].dtype == target_dtype
else:
out = f(lhs, rhs)
assert out.dtype == rhs_dtype
assert out.args[0].dtype == rhs_dtype
assert out.args[1].dtype == rhs_dtype
verify_general_dtype_support(lambda a, b: a + b)
verify_general_dtype_support(lambda a, b: a * b)
verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True)
verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True)
verify_callop_float_only(lambda a, b: te.power(a, b))
# verify bool & int32 constant folding
assert tvm.tir.const(1) == tvm.tir.const(True)
assert tvm.tir.const(2) != tvm.tir.const(True)
def test_if_then_else():
cases = [
[(te.var("cond", dtype="bool"), "bool", "int32"), "int32"],
[(True, "int32", "float32"), "float32"],
[(False, "int32", "int64"), "int64"],
[(te.var("cond", dtype="bool"), "uint32", "int32"), "uint32"],
[(te.var("cond", dtype="int32"), "uint32", "int32"), "uint32"],
]
for (cond, lhs_dtype, rhs_dtype), out_dtype in cases:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if cond is True or cond is False:
out = tvm.tir.if_then_else(cond, lhs, rhs)
out2 = tvm.tir.if_then_else(not cond, rhs, lhs)
out3 = tvm.tir.if_then_else(not cond, lhs, rhs)
assert tvm.ir.structural_equal(out, out2) == 1
if cond:
assert tvm.ir.structural_equal(out, lhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, rhs.astype(out_dtype)) == 1
else:
assert tvm.ir.structural_equal(out, rhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, lhs.astype(out_dtype)) == 1
elif cond.dtype == "bool":
out = tvm.tir.if_then_else(cond, lhs, rhs)
assert out.dtype == out_dtype
assert out.args[1].dtype == out_dtype
assert out.args[2].dtype == out_dtype
elif cond.dtype != "bool":
check_throws(lambda: tvm.tir.if_then_else(cond, lhs, rhs))
else:
raise ValueError("Unknown combinations")
if __name__ == "__main__":
test_const_fold()
test_const_fold2()
test_const_fold3()
test_const_fold4()
test_binary_dtype_match()
test_if_then_else()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ptx_cp_async.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_cp_async(A: T.Buffer[(32, 128), "float16"], B: T.Buffer[(32, 128), "float16"]) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
for i in range(16):
T.evaluate(
T.ptx_cp_async(
A_shared.data, tx * 128 + 8 * i, A.data, tx * 128 + 8 * i, 16, dtype="float16"
)
)
# TODO(masahi): Remove dtype requirement from TVMScript parser
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@tvm.testing.requires_cuda_compute_version(8)
def test_ptx_cp_async():
f = ptx_cp_async
mod = tvm.build(f, target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
if __name__ == "__main__":
test_ptx_cp_async()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ptx_ldmatrix.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else: # num == 4
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ptx_mma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def gemm_mma_m8n8k4_row_col_fp64pf64fp64(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 4], dtype="float64")
B = T.match_buffer(b, [8, 4], dtype="float64")
C = T.match_buffer(c, [8, 8], dtype="float64")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([1], "float64", scope="local")
MultiB = T.decl_buffer([1], "float64", scope="local")
Accum = T.decl_buffer([2], "float64", scope="local")
for i in range(2):
Accum[i] = T.float64(0)
MultiA[0] = A[(tx % 32) // 4, (tx % 32) % 4]
MultiB[0] = B[(tx % 32) // 4, (tx % 32) % 4]
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"col",
"fp64",
"fp64",
"fp64",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float64",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m8n8k4_row_col_fp64pf64fp64():
sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_col_fp64pf64fp64)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [8, 4]).astype("float64")
B_np = np.random.uniform(-1, 1, [8, 4]).astype("float64")
C_np = np.zeros([8, 8]).astype("float64")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float64"), B_np.astype("float64").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k4_row_row_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 4], dtype="float16")
B = T.match_buffer(b, [4, 16], dtype="float16")
C = T.match_buffer(c, [16, 16], dtype="float16")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([8], "float16", scope="local")
for i in range(8):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)),
mma_multi_a_col,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) % 4,
mma_multi_b_col + (4 * ((tx % 32) // 8)),
]
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"row",
"fp16",
"fp16",
"fp16",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float16",
)
)
for mma_accum_c_id in range(8):
C[
((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)),
mma_accum_c_id % 4 + (4 * ((tx % 32) % 16 // 8)) + mma_accum_c_id // 4 * 8,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(7)
def test_gemm_mma_m8n8k4_row_row_fp16fp16fp16():
sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp16)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16")
B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16")
C_np = np.zeros([16, 16]).astype("float16")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float16"), B_np.astype("float16"))
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k4_row_row_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 4], dtype="float16")
B = T.match_buffer(b, [4, 16], dtype="float16")
C = T.match_buffer(c, [16, 16], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([8], "float32", scope="local")
for i in range(8):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)),
mma_multi_a_col,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) % 4,
mma_multi_b_col + (4 * ((tx % 32) // 8)),
]
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"row",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(8):
C[
((tx % 32) % 2)
+ ((mma_accum_c_id // 2 % 2) * 2)
+ 4 * ((tx % 32) // 16)
+ ((tx % 32) % 16 // 4) % 2 * 8,
(tx % 32) % 4 // 2 * 2
+ (tx % 32) % 16 // 8 * 4
+ mma_accum_c_id % 2
+ mma_accum_c_id // 4 * 8,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(7)
def test_gemm_mma_m8n8k4_row_row_fp16fp16fp32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16")
B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16")
C_np = np.zeros([16, 16]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32"))
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="int8")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "int8", scope="local")
MultiB = T.decl_buffer([4], "int8", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 4]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 4]
T.evaluate(
T.ptx_mma(
"m8n8k16",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id]
# This test uses mma instructions that are not available on NVCC 10.1.
# Failure occurs during the external call to nvcc, when attempting to
# generate the .fatbin file.
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k16_row_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
C_np = np.zeros([8, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="uint8")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "int8", scope="local")
MultiB = T.decl_buffer([4], "uint8", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 4]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 4]
T.evaluate(
T.ptx_mma(
"m8n8k16",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id]
# This test uses mma instructions that are not available on NVCC 10.1.
# Failure occurs during the external call to nvcc, when attempting to
# generate the .fatbin file.
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k16_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8")
C_np = np.zeros([8, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k32_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 32], dtype="int4")
B = T.match_buffer(b, [8, 32], dtype="int4")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int4", scope="local")
MultiB = T.decl_buffer([8], "int4", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(8):
MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 8]
for mma_multi_b_col in T.vectorized(8):
MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 8]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"int4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id]
# This test uses mma instructions that are not available on NVCC 10.1.
# Failure occurs during the external call to nvcc, when attempting to
# generate the .fatbin file.
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k32_row_col_s4s4s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4s4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([8, 32], "int4", ctx)
B_tvm = tvm.nd.empty([8, 32], "int4", ctx)
C_tvm = tvm.nd.empty([8, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
# Currently the correctness is not checked.
# TODO: add correctness checking here.
@T.prim_func
def gemm_mma_m8n8k32_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 32], dtype="int4")
B = T.match_buffer(b, [8, 32], dtype="uint4")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int4", scope="local")
MultiB = T.decl_buffer([8], "uint4", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(8):
MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 8]
for mma_multi_b_col in T.vectorized(8):
MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 8]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id]
# This test uses mma instructions that are not available on NVCC 10.1.
# Failure occurs during the external call to nvcc, when attempting to
# generate the .fatbin file.
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k32_row_col_s4u4s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4u4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([8, 32], "int4", ctx)
B_tvm = tvm.nd.empty([8, 32], "uint4", ctx)
C_tvm = tvm.nd.empty([8, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
# Currently the correctness is not checked.
# TODO: add correctness checking here.
@T.prim_func
def gemm_mma_m16n8k8_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [8, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([2], "float16", scope="local")
Accum = T.decl_buffer([4], "float32", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_a_col % 2
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4 + mma_multi_b_col // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_b_col % 2
]
T.evaluate(
T.ptx_mma(
"m16n8k8",
"row",
"col",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(4):
C[(tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2] = Accum[
mma_accum_c_id
]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k8_row_col_fp16fp16fp32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k8_row_col_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 8]).astype("float16")
C_np = np.zeros([16, 8]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [8, 16], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([4], "float16", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 4 // 2 * 8,
(tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col // 4 * 8,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col // 2 * 8,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp16",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float16",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_fp16fp16fp16():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp16)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16")
C_np = np.zeros([16, 8]).astype("float16")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float16"), B_np.astype("float16").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [8, 16], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([4], "float32", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 4 // 2 * 8,
(tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col // 4 * 8,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col // 2 * 8,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_fp16fp16fp32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16")
C_np = np.zeros([16, 8]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="int8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int8", scope="local")
MultiB = T.decl_buffer([4], "int8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col // 4 * 8,
(tx % 32) % 4 * 4 + mma_multi_a_col % 4,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 4 + mma_multi_b_col,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="uint8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int8", scope="local")
MultiB = T.decl_buffer([4], "uint8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col // 4 * 8,
(tx % 32) % 4 * 4 + mma_multi_a_col % 4,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 4 + mma_multi_b_col,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k32_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 32], dtype="int8")
B = T.match_buffer(b, [8, 32], dtype="int8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([16], "int8", scope="local")
MultiB = T.decl_buffer([8], "int8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(16):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 8 // 4 * 8,
(tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col // 8 * 16,
]
for mma_multi_b_col in range(8):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col // 4 * 16,
]
T.evaluate(
T.ptx_mma(
"m16n8k32",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k32_row_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 32]).astype("int8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k32_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 32], dtype="int8")
B = T.match_buffer(b, [8, 32], dtype="uint8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([16], "int8", scope="local")
MultiB = T.decl_buffer([8], "uint8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(16):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 8 // 4 * 8,
(tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col // 8 * 16,
]
for mma_multi_b_col in range(8):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col // 4 * 16,
]
T.evaluate(
T.ptx_mma(
"m16n8k32",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k32_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 32]).astype("uint8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k64_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 64], dtype="int4")
B = T.match_buffer(b, [8, 64], dtype="int4")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([32], "int4", scope="local")
MultiB = T.decl_buffer([16], "int4", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(32):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 16 // 8 * 8,
(tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col // 16 * 32,
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col // 8 * 32,
]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"int4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k64_row_col_s4s4s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4s4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 64], "int4", ctx)
B_tvm = tvm.nd.empty([8, 64], "int4", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
# Currently the correctness is not checked.
# TODO: add correctness checking here.
@T.prim_func
def gemm_mma_m16n8k64_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 64], dtype="int4")
B = T.match_buffer(b, [8, 64], dtype="uint4")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([32], "int4", scope="local")
MultiB = T.decl_buffer([16], "uint4", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(32):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 16 // 8 * 8,
(tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col // 16 * 32,
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col // 8 * 32,
]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k64_row_col_s4u4s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4u4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 64], "int4", ctx)
B_tvm = tvm.nd.empty([8, 64], "uint4", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
# Currently the correctness is not checked.
# TODO: add correctness checking here.
@T.prim_func
def gemm_mma_m16n8k256_row_col_b1b1s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 256], dtype="int1")
B = T.match_buffer(b, [8, 256], dtype="int1")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([128], "int1", scope="local")
MultiB = T.decl_buffer([64], "int1", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(128):
MultiA[mma_multi_a_col] = A[
(tx % 32) // 4 + mma_multi_a_col % 64 // 32 * 8,
(tx % 32) % 4 * 32 + mma_multi_a_col % 32 + mma_multi_a_col // 64 * 128,
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32) // 4,
(tx % 32) % 4 * 32 + mma_multi_b_col % 32 + mma_multi_b_col // 32 * 128,
]
T.evaluate(
T.ptx_mma(
"m16n8k256",
"row",
"col",
"int1",
"int1",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
"xor",
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32) // 4 + mma_accum_c_id // 2 * 8,
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k256_row_col_b1b1s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k256_row_col_b1b1s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 256], "int1", ctx)
B_tvm = tvm.nd.empty([8, 256], "int1", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
# Currently the correctness is not checked.
# TODO: add correctness checking here.
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_ptx_mma_sp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
def gen_2in4_mask(m: int, n: int):
assert n % 4 == 0
return np.array(
[[np.sort(np.random.choice(4, 2, replace=False)) for _ in range(n // 4)] for _ in range(m)]
).astype("uint8")
def get_dense_mat_by_mask(val, mask):
m, n_chunks, _ = mask.shape
val = val.reshape(m, n_chunks, 2)
ret = np.zeros((m, n_chunks, 4)).astype(val.dtype)
for i in range(m):
for j in range(n_chunks):
for k in range(2):
ret[i, j, mask[i, j, k]] = val[i, j, k]
return ret.reshape(m, n_chunks * 4)
@T.prim_func
def mma_sp_m16n8k16_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx // 4 + i // 2 * 8, tx % 4 * 2 + i % 2]
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i // 2 * 8, tx // 4]
meta_local[0] = metadata[tx // 4]
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k16_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx // 4 + i // 2 * 8, tx % 4 * 2 + i % 2]
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i // 2 * 8, tx // 4]
meta_local[0] = metadata[tx // 4]
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k32_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4) // 2 * 8 + tx // 4, i // 4 * 8 + tx % 4 * 2 + i % 2]
for i in range(8):
multi_b[i] = B[i // 2 * 8 + tx % 4 * 2 + i % 2, tx // 4]
meta_local[0] = metadata[tx // 4 * 2 + tx % 2]
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k32_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4) // 2 * 8 + tx // 4, i // 4 * 8 + tx % 4 * 2 + i % 2]
for i in range(8):
multi_b[i] = B[i // 2 * 8 + tx % 4 * 2 + i % 2, tx // 4]
meta_local[0] = metadata[tx // 4 * 2 + tx % 2]
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp16",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k16_f16():
def get_meta_m16n8k16_half(mask):
assert mask.shape == (16, 4, 2)
mask = mask.reshape(16, 8)
ret = np.zeros((8,)).astype("uint32")
for i in range(8):
base = 1
for blk in range(2):
for j in range(8):
ret[i] |= int(mask[blk * 8 + i, j]) * base
base = base << 2
return ret
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k16_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k16_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
B_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
mask = gen_2in4_mask(16, 16)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k16_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k32_f16():
def get_meta_m16n8k32_half(mask):
assert mask.shape == (16, 8, 2)
mask = mask.reshape(16, 2, 8)
ret = np.zeros((8, 2)).astype("uint32")
for i in range(8):
for k in range(2):
base = 1
for blk in range(2):
for j in range(8):
ret[i, k] |= int(mask[blk * 8 + i, k, j]) * base
base = base << 2
return ret.reshape(16)
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k32_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k32_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [32, 8]).astype("float16")
mask = gen_2in4_mask(16, 32)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k32_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
test_mma_sp_m16n8k16_f16()
test_mma_sp_m16n8k32_f16()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_renew_defs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.tir.buffer import Buffer
from tvm.tir.function import PrimFunc
from tvm.tir.stmt import Block
def _check_func_signature_remap(lhs: PrimFunc, rhs: PrimFunc):
assert lhs != rhs
for x, y in zip(lhs.params, rhs.params):
assert x != y
assert lhs.buffer_map[x] != rhs.buffer_map[y]
def _check_buffer_decl(lhs: Buffer, rhs: Buffer):
assert lhs != rhs
assert lhs.data != rhs.data
def _check_block_signature_remap(lhs: Block, rhs: Block):
assert lhs != rhs
for x, y in zip(lhs.iter_vars, rhs.iter_vars):
assert x != y
assert x.var != y.var
for x, y in zip(lhs.alloc_buffers, rhs.alloc_buffers):
_check_buffer_decl(x, y)
for x, y in zip(lhs.match_buffers, rhs.match_buffers):
assert x != y
_check_buffer_decl(x.buffer, y.buffer)
def test_simple():
@T.prim_func
# Buffer A should be remapped
def elementwise(A: T.Buffer[(128, 128), "float32"]):
# Buffer B should be remapped
B = T.alloc_buffer((128, 128), "float32")
# i, j should be remapped
for i, j in T.grid(128, 128):
with T.block("B"):
# vi, vj should be remapped
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
f1 = elementwise
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
# check root block
_check_block_signature_remap(f1.body.block, f2.body.block)
# check remap of i
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
# check remap of j
assert f1.body.block.body.body.loop_var != f2.body.block.body.body.loop_var
# check inner block
def _get_block(f):
return f.body.block.body.body.body.block
_check_block_signature_remap(_get_block(f1), _get_block(f2))
def test_match_buffer():
@T.prim_func
# A and B should be remapped
def func_match_buffer(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]):
with T.block("root"):
s = T.var("int32")
e = T.var("int32")
# A0 should be remapped
A0 = T.match_buffer(
A[0:128, 0:128],
shape=(128, 128),
dtype="float32",
# s and e should be remapped
strides=[s, s],
elem_offset=e,
)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A0[vi, vj] * 2.0
f1 = func_match_buffer
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
_check_block_signature_remap(f1.body.block, f2.body.block)
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
def _get_block(f):
return f.body.block
block1 = _get_block(f1)
block2 = _get_block(f2)
_check_block_signature_remap(block1, block2)
matched_buffer1 = block1.match_buffers[0].buffer
matched_buffer2 = block2.match_buffers[0].buffer
# Stride var s should be remapped
assert matched_buffer1.strides[0] != matched_buffer2.strides[0]
assert matched_buffer1.strides[1] != matched_buffer2.strides[1]
# s should be only remapped once
assert matched_buffer1.strides[0] == matched_buffer1.strides[1]
assert matched_buffer2.strides[0] == matched_buffer2.strides[1]
# Element-offset var e should be remapped
assert matched_buffer1.elem_offset != matched_buffer2.elem_offset
def test_undefined_buffer():
@T.prim_func
def access_alloc():
# Buffer A should be remapped
A_data = T.allocate([128], "float16", "global")
A = T.buffer_decl(shape=[128], dtype="float16", data=A_data)
# check if buffer var also get remapped
T.evaluate(A.data)
for i in range(128):
A[i] = A[i] + T.float16(1.0)
f1 = access_alloc
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
assert f1.body.buffer_var != f2.body.buffer_var
def _get_buffer_store_buffer(f):
return f.body.body[1].body.buffer
_check_buffer_decl(_get_buffer_store_buffer(f1), _get_buffer_store_buffer(f2))
def test_symbolic_func():
@T.prim_func
def symbolic_func(a: T.handle, b: T.handle, n: T.int32):
m = T.var("int32")
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m * 2))
for i, j in T.grid(n, m):
B[i, j * 2] = A[i, j]
B[i, j * 2 + 1] = A[i, j]
f1 = symbolic_func
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from typing import List
import pytest
import tvm
import tvm.testing
from tvm.tir.function import TensorIntrin
from tvm.tir.tensor_intrin.x86 import dot_product_16x4_u8i8i32_desc
from tvm.tir.tensor_intrin.cuda import (
WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
)
from tvm.tir import Evaluate, For, ForKind, IndexMap, Var, decl_buffer, floordiv, floormod, Schedule
from tvm.tir.analysis import expr_deep_equal
from tvm.tir.schedule.analysis import (
get_auto_tensorize_mapping_info,
suggest_index_map,
get_tensorize_loop_mapping,
TensorizeInfo,
)
from tvm.script import tir as T
from tvm.tir.stmt_functor import pre_order_visit
from tvm.meta_schedule.testing import te_workload
from tvm.te import create_prim_func
def _make_vars(*args: str) -> List[Var]:
return [Var(arg, dtype="int32") for arg in args]
def _make_loops(loop_vars: List[Var], extents: List[int]) -> List[For]:
assert len(loop_vars) == len(extents)
return [
For(
loop_var=loop_var,
min_val=0,
extent=extent,
kind=ForKind.SERIAL,
body=Evaluate(0),
)
for loop_var, extent in zip(loop_vars, extents)
]
def test_suggest_index_map_simple():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8, 256]),
indices=[
floordiv(i, 16) * 4 + floordiv(j, 16),
floormod(i, 16) * 16 + floormod(j, 16),
],
loops=_make_loops(
loop_vars=[i, j],
extents=[32, 64],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x, y: [
floordiv(x, 4),
floordiv(y, 16),
floormod(x, 4),
floormod(y, 16),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_bijective():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8]),
indices=[floormod(j, 4) * 2 + i],
loops=_make_loops(
loop_vars=[i, j],
extents=[2, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x: [
floormod(x, 2),
floordiv(x, 2),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_winograd():
"""use case in winograd conv where the indices are complicated"""
fused_outer, i3_3_fused, i4_0, i4_1 = _make_vars("fused_outer", "i3_3_fused", "i4_0", "i4_1")
eps = floordiv(fused_outer, 336) * 2 + floordiv(floormod(fused_outer, 16), 8)
nu = floordiv(floormod(fused_outer, 336), 112) * 2 + floordiv(floormod(fused_outer, 8), 4)
co = floormod(fused_outer, 4) * 32 + i3_3_fused
ci = (i4_0 * 32) + i4_1
buffer = decl_buffer(shape=[6, 6, 128, 128])
index_map = suggest_index_map(
buffer=buffer,
indices=[eps, nu, co, ci],
loops=_make_loops(
loop_vars=[fused_outer, i3_3_fused, i4_0, i4_1],
extents=[1008, 32, 4, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3: (
floordiv(i0, 2),
floordiv(i1, 2),
floormod(i0, 2),
floormod(((i1 * 4) + floordiv(i2, 32)), 8),
floormod(i2, 32),
floordiv(i3, 32),
floormod(i3, 32),
)
)
assert index_map.is_equivalent_to(expected_index_map)
inverse_index_map = index_map.inverse(buffer.shape)
expected_inverse_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3, i4, i5, i6: (
((i0 * 2) + i2),
((i1 * 2) + floordiv(((i3 * 32) + i4), 128)),
floormod(((i3 * 32) + i4), 128),
((i5 * 32) + i6),
)
)
assert inverse_index_map.is_equivalent_to(expected_inverse_index_map)
@tvm.script.ir_module
class DenseVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def collect_loops(prim_func):
loops = []
def callback(node):
if isinstance(node, tvm.tir.For):
loops.append(node)
return True
pre_order_visit(prim_func.body, callback)
return loops
def test_get_tensorize_loop_mapping_dense_vnni():
s = Schedule(DenseVNNIModule)
block = s.get_block("compute")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
assert isinstance(info, TensorizeInfo)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
_, loop_j, loop_k = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(loop_j)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(loop_k)
def test_get_tensorize_loop_mapping_conv2d_nchwc_vnni():
s = Schedule(Conv2dNCHWcVNNIModule)
block = s.get_block("conv2d_NCHWc_int8")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
# i4 corresonds to the inner output channel axis of the NCHWc output tensor
# for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
_, _, _, _, i4, _, _, _, _, i9 = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i4)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i9)
def test_get_tensorize_loop_mapping_matmul_mma():
@T.prim_func
def matmul_16x16x16xf16f16f16_desc(
A: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
B: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
C: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16, 0:16], A[0:16, 0:16], B[0:16, 0:16])
T.writes(C[0:16, 0:16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
matmul = create_prim_func(
te_workload.matmul_relu(
n=512,
m=512,
k=512,
)
)
s = Schedule(matmul)
block = s.get_block("C")
i0, i1, i2 = s.get_loops(block)
desc_loops = collect_loops(matmul_16x16x16xf16f16f16_desc)
for do_reorder in [False, True]:
# Mapping should be invariant to the loop permutation
if do_reorder:
s.reorder(i2, i0, i1)
info = get_tensorize_loop_mapping(s, block, matmul_16x16x16xf16f16f16_desc)
assert info is not None
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
for i in range(3):
assert desc_loops[i] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i0)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i1)
assert s.get(desc_loop_to_sref[desc_loops[2]]) == s.get(i2)
def test_get_tensorize_loop_mapping_padding_matmul():
matmul = create_prim_func(
te_workload.matmul_relu(
n=127,
m=256,
k=65,
in_dtype="float16",
out_dtype="float16",
)
)
s = Schedule(matmul)
block = s.get_block("C")
desc = TensorIntrin.get(WMMA_SYNC_16x16x16_f16f16f16_INTRIN).desc
info = get_tensorize_loop_mapping(s, block, desc, allow_padding=True)
assert info is not None
expected_padding = [1, 0, 15]
actual_padding = info.block_iter_paddings
assert actual_padding is not None
assert len(actual_padding) == len(expected_padding)
for actual, expected in zip(actual_padding, expected_padding):
assert actual == expected
def check_index_map(workload, block_name, intrin_name, expected_index_map):
s = Schedule(workload)
block = s.get_block(block_name)
desc_func = TensorIntrin.get(intrin_name).desc
info = get_auto_tensorize_mapping_info(s, block, desc_func)
if expected_index_map is None:
assert info is None
return
assert len(info.mappings) == 1
assert IndexMap.from_func(expected_index_map).is_equivalent_to(info.mappings[0])
def test_get_auto_tensorize_mapping_info_conv2d():
conv2d = create_prim_func(
te_workload.conv2d_nhwc(4, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
def test_get_auto_tensorize_mapping_info_conv2d_unit_batch():
conv2d = create_prim_func(
te_workload.conv2d_nhwc(1, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
@pytest.mark.parametrize("b,m,n,k", [(1, 512, 512, 512), (16, 32, 32, 32)])
def test_get_auto_tensorize_mapping_info_batch_matmul(b, m, n, k):
matmul = create_prim_func(
te_workload.batch_matmul_nkkm(b, m, n, k, in_dtype="float16", out_dtype="float32")
)
check_index_map(
matmul, "Z", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, lambda b, m, n, k: (b, m, n, k)
)
@pytest.mark.parametrize(
"n,m,k,expected",
[
(
512,
512,
512,
lambda n, m, k: (
n,
m,
k,
),
),
(1, 32, 32, lambda n, m, k: (n, m, k)),
],
)
def test_get_auto_tensorize_mapping_info_matmul(n, m, k, expected):
matmul = create_prim_func(te_workload.matmul(n, m, k, in_dtype="float16", out_dtype="float32"))
check_index_map(matmul, "C", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, expected)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_block_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule import DepKind
from tvm.tir.stmt_functor import post_order_visit
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def war_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
# pylint: disable=invalid-name
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise_dependency():
s = tir.ScheduleState(elementwise, debug_mask="all")
root = _get_block(s, "root")
block_b = _get_block(s, "B")
block_c = _get_block(s, "C")
# Check get_deps_by_src
(dep,) = s.get_block_scope(root).get_deps_by_src(block_b)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
# Check get_deps_by_dst
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_c)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
def test_matmul_dependency():
s = tir.ScheduleState(matmul, debug_mask="all")
root = _get_block(s, "root")
init = _get_block(s, "init")
update = _get_block(s, "update")
# Check get_deps_by_src
p0, p1 = s.get_block_scope(root).get_deps_by_src(init)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
# Check get_deps_by_dst
p0, p1 = s.get_block_scope(root).get_deps_by_dst(update)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
def test_war_dependency():
s = tir.ScheduleState(war_dependency, debug_mask="all")
root = _get_block(s, "root")
block_c = _get_block(s, "C")
block_b = _get_block(s, "B")
# Check get_deps_by_src
(dep,) = s.get_block_scope(root).get_deps_by_src(block_c)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
# Check get_deps_by_dst
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_b)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_blockize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def single_elementwise(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]):
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
# fmt: on
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
def test_blockize_outer():
@T.prim_func
def after_blockize_outer(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
) -> None:
with T.block("blockized_B"):
vio = T.axis.spatial(1, 0)
vjo = T.axis.spatial(1, 0)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
func = single_elementwise
s = tir.Schedule(func, debug_mask="all")
x, _ = s.get_loops(s.get_block("B"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_outer)
verify_trace_roundtrip(sch=s, mod=func)
def test_blockize_inner():
@T.prim_func
def after_blockize_inner(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
) -> None:
for i in T.serial(128):
with T.block("blockized_B"):
vi = T.axis.spatial(128, i)
vjo = T.axis.spatial(1, 0)
for j in T.serial(128):
with T.block("B"):
vj = T.axis.remap("S", [j])
B[vi, vj] = A[vi, vj] * 2.0
func = single_elementwise
s = tir.Schedule(func, debug_mask="all")
_, y = s.get_loops(s.get_block("B"))
s.blockize(y)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_inner)
verify_trace_roundtrip(sch=s, mod=func)
def test_two_elementwise_blockize_reverse_compute_at():
@T.prim_func
def before_blockize_rca(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(8, 8):
with T.block("B_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i])
B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi = T.axis.spatial(128, i * 16 + ax0)
vj = T.axis.spatial(128, j * 16 + ax1)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def after_blockize_rca(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(8, 8):
with T.block("B_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i])
B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0
with T.block("C_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [ax0, ax1])
T.reads(B[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(C[vi * 16 + vi_i, vj * 16 + vj_i])
C[vi * 16 + vi_i, vj * 16 + vj_i] = B[vi * 16 + vi_i, vj * 16 + vj_i] + 1.0
func = before_blockize_rca
s = tir.Schedule(func, debug_mask="all")
_, _, x, _ = s.get_loops(s.get_block("C"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_rca)
verify_trace_roundtrip(sch=s, mod=func)
def test_two_elementwise_blockize_compute_at():
@T.prim_func
def before_blockize_compute_at(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
# body
# with T.block("root")
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0 in T.grid(8, 8):
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + ax0)
vj = T.axis.spatial(128, j_0 * 16 + ax1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0
)
@T.prim_func
def after_blockize_compute_at(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0 in T.grid(8, 8):
with T.block("B_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(A[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [ax0, ax1])
T.reads(A[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
A[vi_o * 16 + vi_i, vj_o * 16 + vj_i] * 2.0
)
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0
)
func = before_blockize_compute_at
s = tir.Schedule(func, debug_mask="all")
_, _, x, _ = s.get_loops(s.get_block("B"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_compute_at)
verify_trace_roundtrip(sch=s, mod=func)
def test_blockize_init_loops():
@T.prim_func
def rowsum(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128,), "float32"]) -> None:
for k, i in T.grid(128, 128):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def after_rowsum_blockize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128,), "float32"],
) -> None:
with T.block("blockized_B"):
vko = T.axis.R(1, 0)
vio = T.axis.S(1, 0)
with T.init():
for i1 in T.serial(0, 128):
with T.block("B_init"):
vi_init = T.axis.S(128, i1)
B[vi_init] = T.float32(0)
for i0, i1_1 in T.grid(128, 128):
with T.block("B"):
vk, vi = T.axis.remap("RS", [i0, i1_1])
B[vi] = B[vi] + A[vi, vk]
s = tir.Schedule(rowsum, debug_mask="all")
k, _ = s.get_loops(s.get_block("B"))
s.blockize(k)
tvm.ir.assert_structural_equal(s.mod["main"], after_rowsum_blockize)
verify_trace_roundtrip(sch=s, mod=rowsum)
def test_blockize_outer_int64_shape():
@T.prim_func
def single_elementwise_int64(
A: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
) -> None:
for i0, j0, i1, j1 in T.grid(T.int64(1), T.int64(8), T.int64(16), T.int64(16)):
with T.block("B"):
vi = T.axis.S(T.int64(16), i0 * T.int64(16) + i1)
vj = T.axis.S(T.int64(128), j0 * T.int64(16) + j1)
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def after_single_elementwise_int64_blockize(
A: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
) -> None:
for i0, j0 in T.grid(T.int64(1), T.int64(8)):
with T.block("B_o"):
vi_o = T.axis.spatial(T.int64(1), T.int64(0))
vj_o = T.axis.spatial(T.int64(8), j0)
for i1, j1 in T.grid(T.int64(16), T.int64(16)):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i1, j1])
B[vi_i, vj_o * T.int64(16) + vj_i] = A[
vi_i, vj_o * T.int64(16) + vj_i
] + T.float32(1)
s = tir.Schedule(single_elementwise_int64, debug_mask="all")
_, _, i1, _ = s.get_loops(s.get_block("B"))
s.blockize(i1)
tvm.ir.assert_structural_equal(s.mod["main"], after_single_elementwise_int64_blockize)
verify_trace_roundtrip(sch=s, mod=single_elementwise_int64)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_cache_index.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
########## Function before schedule ##########
@T.prim_func
def resize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (1, 3, 40, 40))
B = T.match_buffer(b, (1, 3, 80, 80))
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
B[n, c, vi, vj] = A[n, c, vi // 4 + vj // 4, vj // 2]
@T.prim_func
def resize_cache_index(
A: T.Buffer[(1, 3, 40, 40), "float32"], B: T.Buffer[(1, 3, 80, 80), "float32"]
) -> None:
index_var_0 = T.alloc_buffer([80, 80], dtype="int32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0, ax1 in T.grid(80, 80):
with T.block("index_0"):
v0 = T.axis.spatial(80, ax0)
v1 = T.axis.spatial(80, ax1)
T.reads()
T.writes(index_var_0[v0, v1])
index_var_0[v0, v1] = v0 // 4 + v1 // 4
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = v0 // 2
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[n, c, vi // 4 + vj // 4, vj // 2])
T.writes(B[n, c, vi, vj])
B[n, c, vi, vj] = A[n, c, index_var_0[vi, vj], index_var_1[vj]]
def test_inplace_cache_read():
sch = tvm.tir.Schedule(resize, debug_mask="all")
block = sch.get_block("A")
sch.cache_index(block, 0)
tvm.ir.assert_structural_equal(resize_cache_index, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=resize)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_cache_read_write.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
########## Function before schedule ##########
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_shape_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)))
B = T.alloc_buffer((T.int64(128), T.int64(128)))
C = T.match_buffer(c, (T.int64(128), T.int64(128)))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def func_nested_seq(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 2.0
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def access_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def func_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def func_multi_producer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
for i in range(128):
with T.block("A0"):
vi = T.axis.S(128, i)
A[vi] = 1.0
for i in range(128):
with T.block("A1"):
vi = T.axis.S(128, i)
A[vi] = 2.0
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi]
@T.prim_func
def func_with_block_predicate() -> None:
A = T.alloc_buffer((120))
B = T.alloc_buffer((120))
for i, j in T.grid(16, 8):
with T.block("producer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
A[ax] = 0.0
for i, j in T.grid(16, 8):
with T.block("consumer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
B[ax] = A[ax] + 1.0
@T.prim_func
def inplace_func(data_io: T.Buffer[(64), "int32"]):
data_1d = T.alloc_buffer([64], dtype="int32")
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.remap("S", [i0])
data_1d[v0] = data_io[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[:64])
T.writes(data_1d[:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.remap("S", [i0])
data_io[v0] = data_1d[v0]
@T.prim_func
def inplace_call(data_io: T.Buffer[(64), "int32"]):
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_io[:64])
T.writes(data_io[:64])
T.evaluate(T.call_extern("call_impl", data_io.data, dtype=""))
@T.prim_func
def cache_read_nested_seq_target(
B: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
A = T.alloc_buffer([128, 128], dtype="float32")
A_global = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads()
T.writes(A[vi, vj])
A[vi, vj] = T.float32(2)
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads()
T.writes(B[vi, vj])
B[vi, vj] = T.float32(1)
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads(A[vi, vj], B[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] + B[vi, vj]
for ax0, ax1 in T.grid(128, 128):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A_global[vi, vj] * T.float32(2)
########## Expected function after cache_read ##########
@T.prim_func
def cache_read_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
A_global = T.alloc_buffer((128, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_global[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def cache_read_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((128, 128), scope="local")
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_local[vi, vj] = A[vi, vj]
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A_local[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A_global[vi, vj] * 2.0
@T.prim_func
def cache_read_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
A_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A_global[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A_global.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def cache_read_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = A[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A_global[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def cache_read_multi_consumer_target() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("A"):
vi = T.axis.S(128, i)
A_global[vi] = A[vi]
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def continuous_cache_read(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_shared"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_read() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A_shared[v0] = A[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A_shared[ax] + T.float32(1)
@T.prim_func
def cache_read_shape_int64(var_A: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, (T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(var_C, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
A_global = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
for ax0, ax1 in T.grid(T.int64(128), T.int64(128)):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A_global[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def cache_read_inplace(data_io: T.Buffer[64, "int32"]) -> None:
data_1d = T.alloc_buffer([64], dtype="int32")
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io[v0]
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.spatial(64, i0)
T.reads(data_io_local[v0])
T.writes(data_1d[v0])
data_1d[v0] = data_io_local[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[0:64])
T.writes(data_1d[0:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.spatial(64, i0)
T.reads(data_1d[v0])
T.writes(data_io[v0])
data_io[v0] = data_1d[v0]
@T.prim_func
def cache_inplace_buffer(data_io: T.Buffer[64, "int32"]) -> None:
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
data_io_global = T.alloc_buffer([64], dtype="int32")
data_io_global_1 = T.alloc_buffer([64], dtype="int32")
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_global[v0])
data_io_global[v0] = data_io[v0]
for i0 in T.serial(1):
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io_global[v0]
with T.block("ext_call"):
T.reads(data_io_local[0:64])
T.writes(data_io_local[0:64])
T.evaluate(T.call_extern("call_impl", data_io_local.data, dtype=""))
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_local[v0])
T.writes(data_io_global_1[v0])
data_io_global_1[v0] = data_io_local[v0]
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global_1[v0])
T.writes(data_io[v0])
data_io[v0] = data_io_global_1[v0]
########## Expected function after cache_write ##########
@T.prim_func
def cache_write_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_global = T.alloc_buffer((128, 128), scope="local")
C_local = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B_global"):
vi, vj = T.axis.remap("SS", [i, j])
B_global[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C_local"):
vi, vj = T.axis.remap("SS", [i, j])
C_local[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cache_write_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((128, 128), scope="local")
B_global = T.alloc_buffer((128, 128))
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_local[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_global[vi, vj] = A_local[vi, vj]
for x, y in T.grid(16, 16):
with T.block("B_global"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B_global[vi, vj] = A_global[vi, vj] + 1.0
for x, y in T.grid(16, 16):
with T.block("B_global"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = A_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def cache_write_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
D_global = T.alloc_buffer((128, 128), dtype="float16")
B_global = T.alloc_buffer((128, 128), dtype="float16")
C_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D_global[vi, vj])
D_global[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B_global.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = D_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_global[vi, vj]
@T.prim_func
def cache_write_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A_global"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = A_global[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def continuous_cache_write(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B_local[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_write_intermediate_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A_shared[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A[v0] = A_shared[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A[ax] + 1.0
@T.prim_func
def block_predicate_cache_write_output_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
B_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B_shared[ax] = A[ax] + T.float32(1)
for ax0 in T.serial(120):
with T.block("B_shared"):
v0 = T.axis.spatial(120, ax0)
B[v0] = B_shared[v0]
########## Testcases for cache_read ##########
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_cache_read_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
if use_block_name:
cached_a = sch.cache_read("B", "A", "global")
cached_b = sch.cache_read("C", "B", "local")
else:
cached_a = sch.cache_read(block_b, 0, "global")
cached_b = sch.cache_read(block_c, 0, "local")
assert sch.get(cached_a) == sch.get(sch.get_block("A_global"))
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_read_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "local")
sch.cache_read(block_c, 0, "global")
tvm.ir.assert_structural_equal(cache_read_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_read_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block = "load_store" if use_block_name else sch.get_block("load_store")
sch.cache_read(block, 0, "global")
tvm.ir.assert_structural_equal(cache_read_opaque_access, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_read_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Test that specific consumer block targetting works.
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Also test setting multiple consumers yields same result as unspecified.
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_b, block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_read(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "shared")
sch.cache_read(block_c, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_with_block_predicate(use_block_name):
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_read(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_read_non_int32_shape(use_block_name):
sch = tir.Schedule(elementwise_shape_int64, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_shape_int64, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_shape_int64)
def test_cache_read_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "global")
def test_cache_read_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 1, "global")
def test_cache_read_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "test_scope")
def test_inplace_cache_read():
sch = tvm.tir.Schedule(inplace_func, debug_mask="all")
block = sch.get_block("copy_in")
sch.cache_read(block, 0, "local", [block])
tvm.ir.assert_structural_equal(cache_read_inplace, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_func)
def test_cache_inplace():
# cache_inplace could introduce WAR, which is expected but stage pipeline property changes
debug_mask = tvm.tir.schedule.state.ScheduleDebugMask.VERIFY_SREF_TREE
sch = tvm.tir.Schedule(inplace_call, debug_mask=debug_mask)
block = sch.get_block("ext_call")
blocks = sch.cache_inplace(block, 0, "local")
block = sch.cache_read(blocks[0], 0, "global", [blocks[0]])
block = sch.cache_write(blocks[1], 0, "global")
tvm.ir.assert_structural_equal(cache_inplace_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_call, debug_mask=debug_mask)
def test_cache_read_nested_seq(use_block_name):
sch = tir.Schedule(func_nested_seq, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_nested_seq_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_nested_seq)
########## Testcases for cache_write ##########
def test_cache_write_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
cached_b = sch.cache_write("B" if use_block_name else block_b, 0, "local")
cached_c = sch.cache_write("C" if use_block_name else block_c, 0, "global")
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(cached_c) == sch.get(sch.get_block("C_global"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_write_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_b = "B" if use_block_name else sch.get_block("B")
block_scope = sch.get_block("scope")
sch.cache_write(block_a, 0, "local")
sch.cache_write(block_b, 0, "global")
sch.cache_write(block_scope, 0, "global")
tvm.ir.assert_structural_equal(cache_write_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_write_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block_store = "load_store" if use_block_name else sch.get_block("load_store")
block_opaque = "opaque" if use_block_name else sch.get_block("opaque")
block_match_buffer = "match_buffer" if use_block_name else sch.get_block("match_buffer")
sch.cache_write(block_store, 0, "global")
sch.cache_write(block_opaque, 0, "global")
sch.cache_write(block_match_buffer, 0, "global")
tvm.ir.assert_structural_equal(cache_write_opaque_access, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_write_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
sch.cache_write(block_a, 0, "global")
tvm.ir.assert_structural_equal(cache_write_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_write(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_write(block_b, 0, "shared")
sch.cache_write(block_b, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_with_block_predicate(use_block_name):
# cache write for intermediate buffer
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "producer" if use_block_name else sch.get_block("producer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_intermediate_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
# cache write for external buffer
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_output_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_write_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_a0 = "A0" if use_block_name else sch.get_block("A0")
block_a1 = "A1" if use_block_name else sch.get_block("A1")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a0, 0, "global")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a1, 0, "global")
def test_cache_write_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 1, "global")
def test_cache_write_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 0, "test_scope")
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_compute_at.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def two_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for ax0, ax1 in T.grid(1, 128):
with T.block("B"):
vi = T.axis.S(128, i + ax0)
vj = T.axis.S(128, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_1(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(8, 8):
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i, j])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i_i)
vj = T.axis.S(128, vj_o * 16 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i0_0, i1_0 in T.grid(8, 8):
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i0_0 * 16 + ax0)
vj = T.axis.S(128, i1_0 * 16 + ax1)
B[vi, vj] = A[vi, vj] * 2.0
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i0_1, i1_1 in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i0_1)
vj = T.axis.S(128, vj_o * 16 + i1_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_o, j_o, i_i, j_i in T.grid(4, 4, 32, 32):
with T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi = T.axis.S(128, i_o * 16 + ax0)
vj = T.axis.S(128, j_o * 16 + ax1)
T.reads([B[vi, vj]])
T.writes([C[vi, vj]])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(4, 4):
for ax0, ax1 in T.grid(2, 2):
with T.block("blockized_B"):
vio = T.axis.S(8, i_o * 2 + ax0)
vjo = T.axis.S(8, j_o * 2 + ax1)
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_i, j_i in T.grid(32, 32):
with T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def cuda_matmul_0(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0_4 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1_4 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0_4, v1_4] = C_local[v0_4, v1_4]
@T.prim_func
def cuda_matmul_0_after_compute_at(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j, k in T.grid(4, 4, 2048):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_1(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_2(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k_0 * 8 + k_1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_3(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_4(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_5(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(8, 64):
with T.block("B_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, bx * 64 + j)
B_shared[v0, v1] = B[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def tiled(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def factorized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x"):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj, vk = T.axis.remap("SR", [j, k])
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def factorized_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x"):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k)
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for k in T.serial(0, 4):
with T.block("B"):
vi = T.axis.S(16, j)
vk = T.axis.R(16, i_o * 4 + k)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def not_all_compact_data_flow(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 64):
with T.block("C_1"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def not_all_compact_data_flow_after_compute_at(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 64):
for t in range(2):
with T.block("B"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j * 2 + t)
B[vi, vj] = A[vi, vj]
with T.block("C_1"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def fail_subtree_compact_dataflow(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for j in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j + 64)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_consumers_under_loop(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_producers_under_loop(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + C[vi, vj]
@T.prim_func
def read_out_of_bound(a: T.handle, c:T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for i in T.serial(0, 16):
with T.block("B"):
v = T.axis.S(16, i)
B[v] = A[v]
for j in T.serial(0, 16):
with T.block("C"):
v = T.axis.S(16, j)
T.reads(B[v : v + 2])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def read_out_of_bound_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for j in T.serial(0, 16):
for i in T.serial(0, 2):
with T.block("B"):
v = T.axis.S(16, j + i)
T.where(j + i < 16)
B[v] = A[v]
with T.block("C"):
v = T.axis.S(16, j)
T.reads([B[v : v + 2]])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def multi_reduction(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(), "float32"]):
B = T.alloc_buffer((16, ), dtype="float32")
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
for k in T.grid(16):
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def multi_reduction_after_compute_at(
A: T.Buffer[(16, 16), "float32"],
C:T.Buffer[(), "float32"],
):
B = T.alloc_buffer((16, ), dtype="float32")
for k in T.grid(16):
for kk in T.grid(16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [k, kk])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def tiled_pooling_read_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh, ww in T.grid(224, 224):
with T.block("cache"):
h, w = T.axis.remap("SS", [hh, ww])
cache[h, w] = X[h, w]
for hh_0, ww_0, hh_1, ww_1, khh, kww in T.grid(28, 28, 8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def tiled_pooling_read_cache_after_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0, ax1 in T.grid(10, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(1 <= hh_0 * 8 + ax0 and hh_0 * 8 + ax0 < 225 and 1 <= ww_0 * 8 + ax1 and ww_0 * 8 + ax1 < 225)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def non_uniform_tiled_conv(x: T.Buffer[(1, 3, 100, 100), "float32"],
w: T.Buffer[(16, 3, 3, 3), "float32"],
y: T.Buffer[(1, 16, 98, 98), "float32"]) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for ax0, ax1, ax2, ax3 in T.grid(1, 3, 100, 100):
with T.block("cache"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for h_o, w_o, n, c_o, h_i, w_i, c_i, kh, kw in T.grid(7, 7, 1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \
x_global[nn, cc // 16 * 3 + rc, hh + rh, ww + rw] * w[cc, rc, rh, rw]
@T.prim_func
def non_uniform_tiled_conv_after_compute_at(x: T.Buffer[(1, 3, 100, 100), "float32"],
w: T.Buffer[(16, 3, 3, 3), "float32"],
y: T.Buffer[(1, 16, 98, 98), "float32"]) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for h_o, w_o in T.grid(7, 7):
for ax0, ax1, ax2 in T.grid(3, 17, 17):
with T.block("cache"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(3, ax0)
v2 = T.axis.spatial(100, h_o * 15 + ax1)
v3 = T.axis.spatial(100, w_o * 15 + ax2)
T.where(h_o * 15 + ax1 < 100 and w_o * 15 + ax2 < 100)
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for n, c_o, h_i, w_i, c_i, kh, kw in T.grid(1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \
x_global[nn, cc // 16 * 3 + rc, hh + rh, ww + rw] * w[cc, rc, rh, rw]
@T.prim_func
def concat_two_elemwise(x: T.Buffer[(16,), "float32"],
y: T.Buffer[(8,), "float32"],
T_concat: T.Buffer[(24,), "float32"]) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(16):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T_add_1[ax] = x[ax] + T.float32(1)
for i in T.serial(8):
with T.block("T_add_2"):
ax = T.axis.spatial(8, i)
T_add_2[ax] = y[ax] + T.float32(2)
for i in T.serial(24):
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def concat_two_elemwise_after_compute_at(x: T.Buffer[(16,), "float32"],
y: T.Buffer[(8,), "float32"],
T_concat: T.Buffer[(24,), "float32"]) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(24):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T.where(i < 16)
T_add_1[ax] = x[ax] + T.float32(1)
with T.block("T_add_2"):
ax = T.axis.spatial(8, i - 16)
T.where(16 <= i)
T_add_2[ax] = y[ax] + T.float32(2)
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def floordiv_and_floormod_indices(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16])
Y = T.match_buffer(b, [256])
temp = T.alloc_buffer([16, 16])
for i, j in T.grid(16, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + 1.0
for i in T.serial(0, 256):
with T.block("B"):
v_i = T.axis.remap("S", [i])
Y[v_i] = temp[v_i // 16, v_i % 16]
@T.prim_func
def floordiv_and_floormod_indices_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16], dtype="float32")
Y = T.match_buffer(b, [256], dtype="float32")
temp = T.alloc_buffer([16, 16], dtype="float32")
for i in T.serial(0, 16):
for j in T.serial(0, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + T.float32(1)
for ax0 in T.serial(0, 16):
with T.block("B"):
v_i = T.axis.spatial(256, i * 16 + ax0)
Y[v_i] = temp[v_i // 16, v_i % 16]
@T.prim_func
def tiled_repeat_op(x: T.Buffer[(4,), "float32"], T_repeat: T.Buffer[(64,), "float32"]) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0 in T.serial(4):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0)
T_add[ax0] = x[ax0] + 1.0
for i0_0, i0_1 in T.grid(8, 8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0 // 16]
@T.prim_func
def tiled_repeat_op_after_compute_at(x: T.Buffer[(4,), "float32"], T_repeat: T.Buffer[(64,), "float32"]) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0_0 in T.serial(8):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0_0 // 2)
T_add[ax0] = x[ax0] + T.float32(1)
for i0_1 in T.serial(8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0 // 16]
@T.prim_func
def static_bound(A: T.Buffer[(32, 1), "float32"], C: T.Buffer[(32, 1), "float32"]) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i, j in T.grid(32, 1):
with T.block("B"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(32, 32):
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def static_bound_after_compute_at(A: T.Buffer[(32, 1), "float32"], C: T.Buffer[(32, 1), "float32"]) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i in range(32):
for ax0, ax1 in T.grid(1, 1):
with T.block("B"):
vi = T.axis.spatial(32, i + ax0)
vj = T.axis.spatial(1, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(32):
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
# fmt: on
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_compute_at_two_elementwise(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops("C" if use_block_name else sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(two_elementwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_compute_at_blockized_1(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_outer"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_1)
def test_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("B_outer")
_, loop, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_2)
def test_compute_at_cuda_matmul_0(use_block_name):
sch = tir.Schedule(cuda_matmul_0, debug_mask="all")
block = sch.get_block("C")
_, _, _, _, _, loop, _, _ = sch.get_loops(sch.get_block("C_local"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_0_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_0)
def test_compute_at_cuda_matmul_1(use_block_name):
sch = tir.Schedule(cuda_matmul_1, debug_mask="all")
block = sch.get_block("A_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_1)
def test_compute_at_cuda_matmul_2(use_block_name):
sch = tir.Schedule(cuda_matmul_2, debug_mask="all")
block = sch.get_block("B_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_3, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_2)
def test_compute_at_cuda_matmul_3(use_block_name):
sch = tir.Schedule(cuda_matmul_3, debug_mask="all")
block = sch.get_block("A_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_4, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_3)
def test_compute_at_cuda_matmul_4(use_block_name):
sch = tir.Schedule(cuda_matmul_4, debug_mask="all")
block = sch.get_block("B_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_5, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_4)
def test_compute_at_reduction_block(use_block_name):
sch = tir.Schedule(multi_reduction, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(multi_reduction_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=multi_reduction)
def test_compute_at_tiled_pooling_read_cache(use_block_name):
sch = tir.Schedule(tiled_pooling_read_cache, debug_mask="all")
compute = sch.get_block("compute")
_, w_o, _, _, _, _ = sch.get_loops(compute)
cache = sch.get_block("cache")
sch.compute_at(cache, w_o)
tvm.ir.assert_structural_equal(tiled_pooling_read_cache_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_pooling_read_cache)
def test_compute_at_non_uniform_tiled_conv(use_block_name):
sch = tir.Schedule(non_uniform_tiled_conv, debug_mask="all")
compute = sch.get_block("compute")
sch.compute_at(sch.get_block("cache"), sch.get_loops(compute)[1])
tvm.ir.assert_structural_equal(non_uniform_tiled_conv_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_uniform_tiled_conv)
def test_compute_at_concat(use_block_name):
sch = tir.Schedule(concat_two_elemwise, debug_mask="all")
concat = sch.get_block("T_concat")
add1 = sch.get_block("T_add_1")
add2 = sch.get_block("T_add_2")
axis = sch.get_loops(concat)[0]
sch.compute_at(add1, axis)
sch.compute_at(add2, axis)
tvm.ir.assert_structural_equal(concat_two_elemwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=concat_two_elemwise)
def test_compute_at_tiled_repeat_op(use_block_name):
sch = tir.Schedule(tiled_repeat_op, debug_mask="all")
outer_ax, _ = sch.get_loops(sch.get_block("T_repeat"))
sch.compute_at(sch.get_block("T_add"), outer_ax)
tvm.ir.assert_structural_equal(tiled_repeat_op_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_repeat_op)
def test_reverse_compute_at_tiled(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled)
def test_reverse_compute_at_tiled_trivial_binding(use_block_name):
sch = tir.Schedule(tiled_trivial_binding, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_trivial_binding_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_trivial_binding)
def test_reverse_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("C")
_, loop = sch.get_loops(sch.get_block("B_outer"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_2)
def test_reverse_compute_at_factorized(use_block_name):
sch = tir.Schedule(factorized, debug_mask="all")
block = sch.get_block("B")
_, loop, _, _ = sch.get_loops(sch.get_block("B_rf"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(factorized_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=factorized)
def test_reverse_compute_at_floordiv_and_floormod_indices(use_block_name):
sch = tir.Schedule(floordiv_and_floormod_indices, debug_mask="all")
A = sch.get_block("A")
B = sch.get_block("B")
sch.reverse_compute_at(B, sch.get_loops(A)[0])
tvm.ir.assert_structural_equal(
floordiv_and_floormod_indices_after_reverse_compute_at, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=floordiv_and_floormod_indices)
def test_read_out_of_bound(use_block_name):
sch = tir.Schedule(read_out_of_bound, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(read_out_of_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=read_out_of_bound)
def test_compact_dataflow(use_block_name):
sch = tir.Schedule(not_all_compact_data_flow, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_1"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(not_all_compact_data_flow_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=not_all_compact_data_flow)
def test_compute_at_simplify_static_bound(use_block_name):
sch = tir.Schedule(static_bound, debug_mask="all")
block = sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(static_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=static_bound)
def test_compute_at_non_perfect_channel_group(use_block_name):
@T.prim_func
def grouped_channel_bias(
X: T.Buffer[(720, 8, 8), "float32"], Y: T.Buffer[(720, 8, 8), "float32"]
):
B = T.alloc_buffer([45], dtype="float32", scope="")
for i in T.grid(45):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = vi
for c_o, h, w, c_i in T.grid(2, 8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc // 16]
@T.prim_func
def grouped_channel_bias_non_perfect_tiled(
X: T.Buffer[(720, 8, 8), "float32"], Y: T.Buffer[(720, 8, 8), "float32"]
):
B = T.alloc_buffer([45], dtype="float32")
for c_o in range(2):
for ax0 in range(23):
with T.block("init"):
vi = T.axis.spatial(45, c_o * 22 + ax0)
B[vi] = vi
for h, w, c_i in T.grid(8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc // 16]
sch = tir.Schedule(grouped_channel_bias, debug_mask="all")
loop = sch.get_loops(sch.get_block("compute"))[0]
sch.compute_at(sch.get_block("init"), loop)
tvm.ir.assert_structural_equal(sch.mod["main"], grouped_channel_bias_non_perfect_tiled)
def test_fail_subtree_complete_block(use_block_name):
sch = tir.Schedule(fail_subtree_compact_dataflow, debug_mask="all")
block = sch.get_block("B_0")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="complete block"):
sch.compute_at(block, loop)
def test_fail_not_in_same_scope(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C_inner"))
with pytest.raises(tvm.tir.ScheduleError, match="same block scope"):
sch.compute_at(block, loop)
def test_fail_loop_is_ancestor_of_block(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="ancestor of block"):
sch.compute_at(block, loop)
def test_fail_output_block(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = "C" if use_block_name else sch.get_block("C")
loop, _, _, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="output block"):
sch.compute_at(block, loop)
def test_fail_all_consumers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_consumers_under_loop, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the consumer"):
sch.compute_at(block, loop)
def test_fail_all_producers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_producers_under_loop, debug_mask="all")
block = "D" if use_block_name else sch.get_block("D")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the producer"):
sch.reverse_compute_at(block, loop)
def test_compute_at_int64_loop(use_block_name):
def _create_prim_func():
n = te.var("n", dtype="int64")
m = te.var("m", dtype="int64")
A = te.placeholder((n, m), name="A", dtype="float32")
B = te.placeholder((n, m), name="B", dtype="float32")
C = te.compute((n, m), lambda i, j: A[i, j] + B[i, j], name="C")
D = te.compute((n, m), lambda i, j: C[i, j] + 1.0, name="D")
return te.create_prim_func([A, B, D])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
block_d = "D" if use_block_name else sch.get_block("D")
i, _ = sch.get_loops(block_d)
sch.compute_at(block_c, i)
verify_trace_roundtrip(sch=sch, mod=mod)
def test_compute_at_to_index():
@T.prim_func
def multi_producers_conv(
data: T.Buffer[(1, 3, 224, 224), "int8"],
w: T.Buffer[(16, 3, 7, 7), "int8"],
conv: T.Buffer[(1, 16, 112, 112), "int32"],
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 3, 230, 230):
with T.block("pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data[i0_1, i1_1, i2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for i0 in T.serial(1):
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
@T.prim_func
def multi_producers_after_compute_at(
data: T.Buffer[(1, 3, 224, 224), "int8"],
w: T.Buffer[(16, 3, 7, 7), "int8"],
conv: T.Buffer[(1, 16, 112, 112), "int32"],
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0 in T.serial(1):
for ax0, ax1, ax2 in T.grid(3, 229, 229):
with T.block("pad"):
i0_1 = T.axis.spatial(1, 0)
i1_1 = T.axis.spatial(3, ax0)
i2_1 = T.axis.spatial(230, ax1)
i3_1 = T.axis.spatial(230, ax2)
T.reads(data[i0_1, i1_1, i2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
sch = tir.Schedule(multi_producers_conv, debug_mask="all")
block_c = sch.get_block("pad")
axis = sch.get_loops("conv")[0]
sch.compute_at(block_c, axis, index=-2)
tvm.ir.assert_structural_equal(multi_producers_after_compute_at, sch.mod["main"])
def test_reverse_compute_at_to_index():
@T.prim_func
def main(A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def main_reverse_compute_at(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("D"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
sch = tir.Schedule(main, debug_mask="all")
block_c = sch.get_block("D")
axis = sch.get_loops("B")[2]
sch.reverse_compute_at(block_c, axis, index=1)
tvm.ir.assert_structural_equal(main_reverse_compute_at, sch.mod["main"])
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_compute_inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_multi_producer_consumer(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0 # B has two consumers
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj] # D has two producers
@T.prim_func
def elementwise_multi_consumer_inlined(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj]
@T.prim_func
def elementwise_standalone(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def elementwise_standalone_dce(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def elementwise_under_loop(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def fail_multi_reader_writer(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.alloc_buffer((128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
C[vi, vj] = A[vi, vj] + 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + C[vi, vj]
@T.prim_func
def elementwise_multi_reverse_loads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0
@T.prim_func
def elementwise_multi_reverse_loads_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0
@T.prim_func
def elementwise_reverse_affine_load(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 32, 8, 8), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k, l in T.grid(8, 32, 8, 8):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk, vl] = B[
((((vi * 32) + vj) * 8 + vk) * 8 + vl) // 128,
((((vi * 32) + vj) * 8 + vk) * 8 + vl) % 128,
]
@T.prim_func
def elementwise_reverse_affine_load_inlined(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 32, 8, 8), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[
(vj + vi * 128) // 2048,
(vj + vi * 128) // 64 % 32,
((vj + vi * 128) // 8) % 8,
(vj + vi * 128) % 8,
] = (
A[vi, vj] * 2.0
)
@T.prim_func
def elementwise_reverse_affine_load_unit_iter(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
C = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
for i, j, k, l in T.grid(1, 8, 16, 128):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
D[vi, vj, vk, vl] = C[vj * 16 + vk, vl] + B[vj, vk, vi]
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_inlined(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + B[vi // 16, vi % 16, 0]
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_simplified(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
C = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
D[0, vi, vj, vk] = C[vi * 16 + vj, vk] + B[vi, vj, 0]
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_simplified_inlined(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + B[vi // 16, vi % 16, 0]
@T.prim_func
def elementwise_reverse_affine_chain(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(1, 8, 16, 128), "float32"]
):
B = T.alloc_buffer((128, 128))
C = T.alloc_buffer((8, 16, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vk] + 1.0
for i, j, k, l in T.grid(1, 8, 16, 128):
with T.block("D"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
D[vi, vj, vk, vl] = C[vj, vk, vl]
@T.prim_func
def elementwise_reverse_affine_chain_inlined(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(1, 8, 16, 128), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def elementwise_multi_reverse_affine_load(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(8, 16, 128), "float32"],
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vk] + B[vi * 16 + vj, vk]
@T.prim_func
def elementwise_multi_reverse_affine_load_inlined(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + A[vi, vj] * 2.0
@T.prim_func
def elementwise_reverse_non_affine_load(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 16, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vi * 16 + vj]
@T.prim_func
def opaque_access_load(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[0:128, 0:128])
T.writes(C[0:128, 0:128])
T.evaluate(B.access_ptr("r", extent=128))
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def opaque_access_store(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[0:128, 0:128])
T.writes(C[0:128, 0:128])
T.evaluate(B.access_ptr("r", extent=128))
T.evaluate(C.access_ptr("w", extent=128))
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def buffer_matched(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
Bb = T.match_buffer(B[vi : vi + 1, vj], (1, 1))
C[vi, vj] = Bb[0, 0] + 1.0
@T.prim_func
def elementwise_predicate(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(B[i, j] < 10.0)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_predicate_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(A[i, j] * 2.0 < 10.0)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def elementwise_multi_loads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2]
@T.prim_func
def elementwise_multi_loads_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0
@T.prim_func
def access_opaque_ptr_then_elemwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
A_cache = T.alloc_buffer([1024])
BB = T.alloc_buffer([1024])
with T.block("opaque"):
# annotated opaque partial access
T.reads(A[0:512])
T.writes(A_cache[0:512])
T.evaluate(A.access_ptr("r", extent=512))
T.evaluate(A_cache.access_ptr("w", extent=512))
for i in range(512):
with T.block("BB"):
vi = T.axis.remap("S", [i])
BB[vi] = A_cache[vi] * 2.0
for i in range(512):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi] = BB[vi] + 1.0
@T.prim_func
def access_opaque_ptr_then_elemwise_inline(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024], dtype="float32")
B = T.match_buffer(b, [1024], dtype="float32")
A_cache = T.alloc_buffer([1024], dtype="float32")
with T.block("opaque"):
# annotated opaque partial access should be kept
T.reads(A[0:512])
T.writes([A_cache[0:512]])
T.evaluate(A.access_ptr("r", extent=512))
T.evaluate(A_cache.access_ptr("w", extent=512))
for i in T.serial(0, 512):
with T.block("B"):
vi = T.axis.spatial(512, i)
T.reads([A_cache[vi]])
T.writes([B[vi]])
B[vi] = A_cache[vi] * 2.0 + 1.0
@T.prim_func
def matmul_relu(var_A: T.handle, var_B: T.handle, var_compute: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
compute = T.match_buffer(var_compute, [512, 512], dtype="float32")
C = T.alloc_buffer([512, 512], dtype="float32")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads([C[i, j], A[i, k], B[k, j]])
T.writes([C[i, j]])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads([C[i0_1, i1_1]])
T.writes([compute[i0_1, i1_1]])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
@T.prim_func
def inline_block_with_init(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("tensor_rf"):
vi4 = T.axis.spatial(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
with T.init():
B_rf[ax0, ax1, ax2, ax3, vi4] = T.float32(0)
B_rf[ax0, ax1, ax2, ax3, vi4] = (
B_rf[ax0, ax1, ax2, ax3, vi4]
+ A[
ax0,
ax1,
ax2 * 7 + vi4 // 7,
ax3 * 7 + vi4 % 7,
]
)
for i0, i1 in T.grid(1, 512):
for ax0, ax1, ax2, ax3, ax4 in T.grid(49, 1, 1, 1, 1):
with T.block("tensor"):
vi4, ax0_1 = T.axis.remap("RS", [ax0, ax1])
ax1_1 = T.axis.spatial(512, i1 + ax2)
ax2_1, ax3_1 = T.axis.remap("SS", [ax3, ax4])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
B[ax0_1, ax1_1, ax2_1, ax3_1] = (
B[ax0_1, ax1_1, ax2_1, ax3_1] + B_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4]
)
@T.prim_func
def exp_exp_opaque_access_with_tvm_access_ptr(
lookup_table: T.Buffer[(1024,), "int8"],
x: T.Buffer[(16,), "float16"],
compute: T.Buffer[(16,), "float16"],
) -> None:
compute_1 = T.alloc_buffer([16], dtype="float16")
for i0 in T.serial(16):
with T.block("compute"):
i0_1 = T.axis.spatial(16, i0)
T.reads(x[i0_1])
T.writes(compute_1[i0_1])
compute_1[i0_1] = T.exp(x[i0_1], dtype="float16")
for i0 in T.serial(16):
with T.block("compute_1"):
i0_2 = T.axis.spatial(16, i0)
T.reads(lookup_table[0:1024], compute_1[i0_2])
T.writes(compute[i0_2])
T.evaluate(lookup_table.access_ptr("r"))
compute[i0_2] = T.exp(
compute_1[i0_2],
dtype="float16",
)
@T.prim_func
def exp_exp_opaque_access_with_tvm_access_ptr_inlined(
lookup_table: T.Buffer[(1024,), "int8"],
x: T.Buffer[(16,), "float16"],
compute: T.Buffer[(16,), "float16"],
) -> None:
for i0 in T.serial(16):
with T.block("compute_1"):
i0_1 = T.axis.spatial(16, i0)
# Do not put the opaque access to new write region when opaque access
# wrapped with a tvm_access_ptr and the access mask set to "read only"
T.reads(lookup_table[0:1024], x[i0_1])
T.writes(compute[i0_1])
T.evaluate(lookup_table.access_ptr("r"))
compute[i0_1] = T.exp(
T.exp(x[i0_1], dtype="float16"),
dtype="float16",
)
@T.prim_func
def elementwise_overcomputed_producer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(127, 127), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(127, 127):
with T.block("C"):
cvi, cvj = T.axis.remap("SS", [i, j])
C[cvi, cvj] = B[cvi, cvj] + 1.0
@T.prim_func
def elementwise_overcomputed_producer_reverse_inlined(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(127, 127), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i < 127 and j < 127)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def elementwise_producer_not_cover_consumer(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(256, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(256, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.if_then_else(vi >= 128, B[vi - 128, vj], T.float32(0), dtype="float32")
@T.prim_func
def elementwise_predicate_producer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((127, 128))
C = T.match_buffer(c, (127, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i < 127)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(127, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_predicate_producer_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (127, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.where(i < 127)
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A[vi, vj] * T.float32(2) + T.float32(1)
# fmt: off
@tvm.script.ir_module
class Conv2dInt8_TensorCore_with_predicate:
@T.prim_func
def main(p0: T.Buffer[(16, 56, 56, 64), "int8"], p1: T.Buffer[(256, 1, 1, 64), "int8"], p2: T.Buffer[(1, 1, 1, 256), "int32"], p3: T.Buffer[(1, 1, 1, 256), "int32"], p4: T.Buffer[256, "int32"], p5: T.Buffer[256, "int32"], p6: T.Buffer[256, "int32"], p7: T.Buffer[(), "int32"], p8: T.Buffer[1, "int32"], p9: T.Buffer[(16, 56, 56, 256), "int32"], compute: T.Buffer[(16, 56, 56, 256), "int32"]):
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator")
pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared")
p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared")
pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a")
p1_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"):
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2):
for ax0_ax1_fused in T.serial(1024):
with T.block("pad_temp_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0_ax1_fused // 32)
v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32)
T.reads(p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
T.writes(pad_temp_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]], "meta_schedule.cooperative_fetch":4})
pad_temp_reindex_shared[v0, v1] = p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("p1_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused // 32)
v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(p1[v2, v0, v1, v3])
T.writes(p1_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]], "meta_schedule.cooperative_fetch":3})
p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3]
for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 2):
for ax0_0_1, ax1_0_1 in T.grid(1, 1):
with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2)
v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_a"})
for ax0_1_1, ax1_1_1 in T.grid(16, 16):
with T.block("pad_temp_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1])
T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1):
with T.block("p1_reindex_shared_wmma.matrix_b_o"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0)
v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_b_trans"})
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("p1_reindex_shared_wmma.matrix_b"):
v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2):
with T.block("conv2d_nhwc_o"):
v0 = T.axis.reduce(1, 0)
v1 = T.axis.reduce(1, 0)
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3 + ax2_0_4)
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4)
v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2)
T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
with T.init():
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0
for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.cast(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], "int32") * T.cast(p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i], "int32")
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2)
v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_s32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2):
with T.block("conv2d_nhwc_reindex_shared"):
T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64)
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0)
v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + ax1_1 * 64 + ax1_2 * 2 + ax1_3))
T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0])
T.writes(compute_3[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
compute_3[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] = T.q_multiply_shift(T.max(T.min(p7[()] + T.q_multiply_shift_per_axis(conv2d_nhwc_reindex_shared[v0, v1] - p2[0, 0, 0, v1] + p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], 31, False, True, dtype="int32"), 255), 0) - p8[0], 1457846997, 31, 0, dtype="int32")
for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256):
with T.block("compute_4"):
i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12])
T.reads(compute_3[i0_13, i1_13, i2_13, i3_13], p9[i0_13, i1_13, i2_13, i3_13])
T.writes(compute[i0_13, i1_13, i2_13, i3_13])
compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(compute_3[i0_13, i1_13, i2_13, i3_13] + T.q_multiply_shift(p9[i0_13, i1_13, i2_13, i3_13], 2101000910, 31, 0, dtype="int32"), 255), 0)
# fmt: on
# pylint: enable=no-member,invalid-name,unused-variable
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_compute_inline_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_compute_inline_under_loop(use_block_name):
sch = tir.Schedule(elementwise_under_loop, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop)
def test_compute_inline_as_dce(use_block_name):
sch = tir.Schedule(elementwise_standalone, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtrip(sch=sch, mod=elementwise_standalone)
def test_compute_inline_multi_consumer(use_block_name):
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_producer_consumer)
def test_compute_inline_fail_multi_writer(use_block_name):
sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reverse_compute_inline_under_loop(use_block_name):
sch = tir.Schedule(elementwise_under_loop, debug_mask="all")
block_b = sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop)
def test_reverse_compute_inline_fail_as_dce(use_block_name):
sch = tir.Schedule(elementwise_standalone, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_producer(use_block_name):
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all")
block_d = "D" if use_block_name else sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader(use_block_name):
sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_loads)
def test_reverse_compute_inline_affine_load(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_reverse_affine_load_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load)
def test_reverse_compute_inline_multi_affine_load(use_block_name):
sch = tir.Schedule(elementwise_multi_reverse_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_affine_load_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_affine_load)
def test_reverse_compute_inline_affine_load_unit_iter(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(
elementwise_reverse_affine_load_unit_iter_inlined, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter)
def test_reverse_compute_inline_affine_load_unit_iter_simplified(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter_simplified, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(
elementwise_reverse_affine_load_unit_iter_simplified_inlined, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter_simplified)
@pytest.mark.parametrize("reverse_order", [True, False])
def test_reverse_compute_inline_affine_chain(use_block_name, reverse_order):
sch = tir.Schedule(elementwise_reverse_affine_chain, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
block_d = "D" if use_block_name else sch.get_block("D")
if reverse_order:
sch.reverse_compute_inline(block_d)
sch.reverse_compute_inline(block_c)
else:
sch.reverse_compute_inline(block_c)
sch.reverse_compute_inline(block_d)
tvm.ir.assert_structural_equal(elementwise_reverse_affine_chain_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_chain)
def test_reverse_compute_fail_non_affine_load(use_block_name):
sch = tir.Schedule(elementwise_reverse_non_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_fail_multi_reverse_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_loads, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load(use_block_name):
sch = tir.Schedule(opaque_access_load, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store(use_block_name):
sch = tir.Schedule(opaque_access_store, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched(use_block_name):
sch = tir.Schedule(buffer_matched, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_output_block(use_block_name):
sch = tir.Schedule(matmul_relu, debug_mask="all")
block = sch.get_block("compute")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block)
def test_compute_inline_predicate(use_block_name):
sch = tir.Schedule(elementwise_predicate, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_predicate)
def test_compute_inline_multi_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_loads, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_loads)
def test_compute_inline_with_opaque_access(use_block_name):
"""Test not rewrite opaque reads/writes after irrelavant compute inline"""
sch = tir.Schedule(access_opaque_ptr_then_elemwise, debug_mask="all")
BB = "BB" if use_block_name else sch.get_block("BB")
sch.compute_inline(BB)
tvm.ir.assert_structural_equal(access_opaque_ptr_then_elemwise_inline, sch.mod["main"])
def test_inline_block_with_init():
sch = tir.Schedule(inline_block_with_init, debug_mask="all")
block = sch.get_block(name="tensor_rf", func_name="main")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block=block)
def test_compute_inline_opaque_access_with_tvm_access_ptr(use_block_name):
"""Test opaque access with tvm_access_ptr after compute inline"""
sch = tir.Schedule(exp_exp_opaque_access_with_tvm_access_ptr, debug_mask="all")
compute = "compute" if use_block_name else sch.get_block("compute")
sch.compute_inline(compute)
tvm.ir.assert_structural_equal(
exp_exp_opaque_access_with_tvm_access_ptr_inlined, sch.mod["main"]
)
def test_reverse_compute_inline_overcomputed_producer(use_block_name):
"""Test reverse compute inline overcomputed producer"""
sch = tir.Schedule(elementwise_overcomputed_producer, debug_mask="all")
compute = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(compute)
tvm.ir.assert_structural_equal(
elementwise_overcomputed_producer_reverse_inlined, sch.mod["main"]
)
def test_reverse_compute_inline_error_producer_not_cover_consumer(use_block_name):
"""Test reverse compute inline failure when the inlined block iter domains are not covered by
its producer
"""
sch = tir.Schedule(elementwise_producer_not_cover_consumer, debug_mask="all")
compute = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(compute)
def test_reverse_compute_inline_producer_predicate_allowed():
"""Test a case where reverse compute inline is allowed even though the producer has a
non-trivial predicate.
"""
sch = tir.Schedule(elementwise_predicate_producer, debug_mask="all")
sch.reverse_compute_inline(sch.get_block("C"))
tvm.ir.assert_structural_equal(elementwise_predicate_producer_inlined, sch.mod["main"])
def test_reverse_compute_inline_producer_predicate_disallowed():
"""Test reverse compute inline failure when the producer has a non-trivial predicate that cannot be
implied by the synthesized predicate of the new inlined block.
"""
sch = tir.Schedule(Conv2dInt8_TensorCore_with_predicate, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError) as e:
sch.reverse_compute_inline(sch.get_block("compute_4"))
assert (
"that cannot be implied by the synthesized predicate True of the new inlined block"
in str(e)
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_decompose_padding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import numpy as np
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def check_decompose_padding(origin, scheduled, expected, check_run=False):
tvm.ir.assert_structural_equal(scheduled, expected)
if check_run:
in_buffer = origin.buffer_map[origin.params[0]]
out_buffer = origin.buffer_map[origin.params[1]]
in_shape = [int(_) for _ in in_buffer.shape]
out_shape = [int(_) for _ in out_buffer.shape]
x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype))
y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
f_origin = tvm.build(origin)
f_scheduled = tvm.build(scheduled)
f_origin(x, y0)
f_scheduled(x, y1)
tvm.testing.assert_allclose(y0.numpy(), y1.numpy())
def test_1d_decompose_padding():
@T.prim_func
def before_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in range(140):
with T.block("block"):
vi = T.axis.remap("S", [i])
y[vi] = T.if_then_else(vi >= 6 and vi < 134, x[vi - 6], 0, dtype="int32")
@T.prim_func
def after_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in T.serial(140):
with T.block("block_pad_const"):
vi = T.axis.spatial(140, i)
T.reads()
T.writes(y[vi])
y[vi] = 0
for i in T.serial(128):
with T.block("block"):
vi = T.axis.spatial(128, i)
T.reads(x[vi])
T.writes(y[vi + 6])
y[vi + 6] = x[vi]
sch = tir.Schedule(before_decompose, debug_mask="all")
block = sch.get_block("block")
sch.decompose_padding(block, sch.get_loops(block)[0])
check_decompose_padding(before_decompose, sch.mod["main"], after_decompose, check_run=False)
@T.prim_func
def sum_pool_2d(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
):
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 16, 231, 231):
with T.block("pad_temp"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2, ax3] = T.if_then_else(
3 <= ax2 and ax2 < 228 and 3 <= ax3 and ax3 < 228,
x[ax0, ax1, ax2 - 3, ax3 - 3],
T.int8(0),
dtype="int8",
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7):
with T.block("tensor"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
tensor[ax0, ax1, ax2, ax3] = T.int8(0)
tensor[ax0, ax1, ax2, ax3] = (
tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
def test_decompose_hw_padding_direct():
"""Case 0. direct decompose"""
@T.prim_func
def pooling_decompose_0(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
):
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 16, 231, 231):
with T.block("pad_temp_pad_const"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2, ax3] = T.int8(0)
for i0, i1, i2, i3 in T.grid(1, 16, 225, 225):
with T.block("pad_temp"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2 + 3, ax3 + 3] = x[ax0, ax1, ax2, ax3]
for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7):
with T.block("tensor"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
tensor[ax0, ax1, ax2, ax3] = T.int8(0)
tensor[ax0, ax1, ax2, ax3] = (
tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
pad = sch.get_block("pad_temp")
sch.decompose_padding(pad, sch.get_loops(pad)[0])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_0, check_run=True)
def test_decompose_hw_padding_tiled():
"""Case 1. tiling and then decompose"""
@T.prim_func
def pooling_decompose_1(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0 in T.grid(1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 75 + ax2)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3)
T.where(
3 <= i2_0 * 75 + ax1
and i2_0 * 75 + ax1 < 228
and 3 <= i3_0 * 75 + ax2
and i3_0 * 75 + ax2 < 228
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7):
with T.block("tensor"):
ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [3, 75])
wo, wi = sch.split(w, [3, 75])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[3])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_1, check_run=True)
def test_decompose_hw_padding_tiled_and_lift_pad():
"""Case 2. tiling and then decompose, lift const pad values to outer loop"""
@T.prim_func
def pooling_decompose_2(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0, ax0, ax1, ax2 in T.grid(1, 3, 3, 16, 81, 81):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 75 + ax2)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for i0, i2_0, i3_0 in T.grid(1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3)
T.where(
3 <= i2_0 * 75 + ax1
and i2_0 * 75 + ax1 < 228
and 3 <= i3_0 * 75 + ax2
and i3_0 * 75 + ax2 < 228
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7):
with T.block("tensor"):
ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [3, 75])
wo, wi = sch.split(w, [3, 75])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[0])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_2, check_run=True)
def test_decompose_hw_padding_non_perfect_tiled():
"""Case 3. non-perfect tiling and then decompose"""
@T.prim_func
def pooling_decompose_3(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0 in T.grid(1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 86, 86):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 80 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 80 + ax2)
T.where(i2_0 * 80 + ax1 < 231 and i3_0 * 80 + ax2 < 231)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for ax0, ax1, ax2 in T.grid(16, 86, 86):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 80 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 80 + ax2 - 3)
T.where(
3 <= i2_0 * 80 + ax1
and i2_0 * 80 + ax1 < 228
and 3 <= i3_0 * 80 + ax2
and i3_0 * 80 + ax2 < 228
and i2_0 * 80 + ax1 < 231
and i3_0 * 80 + ax2 < 231
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 80, 80, 7, 7):
with T.block("tensor"):
ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 80 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 80 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.where(i2_0 * 80 + i2_1 < 225 and i3_0 * 80 + i3_1 < 225)
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [None, 80])
wo, wi = sch.split(w, [None, 80])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[3])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_3, check_run=True)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
# pylint: enable=no-member,invalid-name,unused-variable
def test_tir_schedule_error_detail():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="detail")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the name: wrong_name" in msg
def test_tir_schedule_error_fast():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="fast")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the specified name" in msg
def test_tir_schedule_error_none():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="none")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "(not rendered)" in msg
def test_tir_schedule_attribute_error():
sch = tir.Schedule(matmul)
with pytest.raises(AttributeError):
sch.non_existent_field()
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_for_kind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def element_wise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_parallelized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i0 in T.parallel(0, 128):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, i1])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_i_bound(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i0 in T.thread_binding(0, 128, thread="threadIdx.x"):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, i1])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_compute_at_split(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * 2.0
for j1o, j1i in T.grid(32, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_compute_at_split_vectorized(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * 2.0
for j1o in T.serial(0, 32):
for j1i in T.vectorized(0, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_split_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i, j_0, j_1 in T.grid(128, 13, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_split_predicate_parallelized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i in T.serial(0, 128):
for j_0 in T.parallel(0, 13):
for j_1 in T.serial(0, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_split_predicate_vectorized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i in T.vectorized(0, 128):
for j_0, j_1 in T.grid(13, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_compute_at_split_j0_j1o_bound(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * 2.0
for j1o in T.thread_binding(0, 32, thread="threadIdx.x"):
for j1i in T.serial(0, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def rowsum(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_unrolled(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i0 in T.unroll(0, 128):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i0, i1])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, T.floordiv(k * k, 2))
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_compact_data_flow(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vk] = 0.0
B[vk] = B[vk] + A[vi, vk]
@T.prim_func
def rowsum_cross_thread_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i0 in T.serial(0, 128):
for i1 in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i0, i1])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def opaque_block(a: T.handle) -> None:
A = T.match_buffer(a, (16,))
for i in T.serial(0, 15):
with T.block("opaque"):
A[i + 1] = A[i + 1] + A[i]
@T.prim_func
def block_inside_init(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i in T.serial(0, 128):
with T.block("outer"):
vi = T.axis.S(128, i)
with T.init():
for j in T.serial(0, 128):
with T.block("init"):
vj = T.axis.S(128, j)
B[vi, vj] = 0.0
for k in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("inner"):
vj, vk = T.axis.remap("SR", [j, k])
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def thread_bound_block_inside_init(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("outer"):
vi = T.axis.S(128, i)
with T.init():
for j in T.serial(0, 128):
with T.block("init"):
vj = T.axis.S(128, j)
B[vi, vj] = 0.0
for k in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("inner"):
vj, vk = T.axis.remap("SR", [j, k])
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def decomposed_gemm(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
):
local = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
local[vi, vj] += A[vi, vk] * B[vj, vk]
for ii, jj in T.grid(4, 4):
with T.block("C"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
C[vi, vj] = local[vi, vj]
@T.prim_func
def decomposed_gemm_after_vectorize(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
):
local = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
local[vi, vj] += A[vi, vk] * B[vj, vk]
for ii in range(4):
for jj in T.vectorized(4):
with T.block("C"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
C[vi, vj] = local[vi, vj]
@T.prim_func
def nested_block_bind(
A: T.Buffer[(16, 16, 16, 16), "float32"], B: T.Buffer[(16, 16, 16), "float32"]
):
for i, j in T.grid(16, 16):
with T.block("outer"):
vi, vj = T.axis.remap("SS", [i, j])
for k, l in T.grid(16, 16):
with T.block("inner"):
vk, vl = T.axis.remap("SR", [k, l])
with T.init():
B[vi, vj, vk] = 0.0
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
@T.prim_func
def thread_bound_nested_block(
A: T.Buffer[(16, 16, 16, 16), "float32"], B: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for i in T.serial(16):
for j in T.thread_binding(16, thread="blockIdx.x"):
with T.block("outer"):
vi, vj = T.axis.remap("SS", [i, j])
for k in T.serial(16):
for l in T.thread_binding(16, thread="threadIdx.x"):
with T.block("inner"):
vk, vl = T.axis.remap("SR", [k, l])
with T.init():
B[vi, vj, vk] = T.float32(0)
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
@T.prim_func
def nested_block_bind_after_cache_read(
A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
for i in T.serial(16):
with T.block("outer"):
vi = T.axis.spatial(16, i)
A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared")
for ax0, ax1 in T.grid(1, 16):
with T.block("A_shared"):
v0 = T.axis.spatial(16, vi + ax0)
v1 = T.axis.spatial(16, ax1)
A_shared[v0, v1] = A[v0, v1]
for j in T.serial(16):
with T.block("inner"):
vj = T.axis.reduce(16, j)
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A_shared[vi, vj]
@T.prim_func
def thread_bound_nested_block_after_cache_read(
A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
for i in T.thread_binding(16, thread="blockIdx.x"):
with T.block("outer"):
vi = T.axis.spatial(16, i)
A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared")
for ax0, ax1 in T.grid(1, 16):
with T.block("A_shared"):
v0 = T.axis.spatial(16, vi + ax0)
v1 = T.axis.spatial(16, ax1)
A_shared[v0, v1] = A[v0, v1]
for j in T.thread_binding(16, thread="threadIdx.x"):
with T.block("inner"):
vj = T.axis.reduce(16, j)
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A_shared[vi, vj]
@T.prim_func
def decomposed_gemm_parallelize_init(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
local = T.alloc_buffer([16, 16], dtype="float32")
for i, j in T.grid(4, 4):
for ii in T.serial(4):
for jj in T.vectorized(4):
with T.block("init"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 + jj)
T.reads()
T.writes(local[vi, vj])
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 + jj)
vk = T.axis.reduce(16, k)
T.reads(local[vi, vj], A[vi, vk], B[vj, vk])
T.writes(local[vi, vj])
local[vi, vj] = local[vi, vj] + A[vi, vk] * B[vj, vk]
for ii, jj in T.grid(4, 4):
with T.block("C"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 + jj)
T.reads(local[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = local[vi, vj]
@T.prim_func
def scatter_compute(A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]):
for i in T.grid(8):
with T.block("first_half"):
vi = T.axis.spatial(16, 8 + i)
B[vi] = A[vi - 8]
for i in T.grid(8):
with T.block("last_half"):
vi = T.axis.spatial(16, i)
B[vi] = A[vi + 8]
@T.prim_func
def scatter_compute_parallelize(
A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
# body
# with T.block("root")
for i in T.parallel(8):
with T.block("first_half"):
vi = T.axis.spatial(16, 8 + i)
T.reads(A[vi - 8])
T.writes(B[vi])
B[vi] = A[vi - 8]
for i in T.parallel(8):
with T.block("last_half"):
vi = T.axis.spatial(16, i)
T.reads(A[vi + 8])
T.writes(B[vi])
B[vi] = A[vi + 8]
# pylint: enable=no-member,invalid-name,unused-variable
def test_parallel():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.parallel(i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_parallelized)
verify_trace_roundtrip(s, mod=element_wise)
def test_parallel_predicate():
s = tir.Schedule(element_wise_split_predicate, debug_mask="all")
_, j, _ = s.get_loops(s.get_block("B"))
s.parallel(j)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_parallelized)
verify_trace_roundtrip(s, mod=element_wise_split_predicate)
def test_parallel_reduction_block_iter():
s = tir.Schedule(matmul, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(k)
def test_parallel_not_quasi_affine():
s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(i)
def test_parallel_not_compact_data_flow():
s = tir.Schedule(rowsum_not_compact_data_flow, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(i)
def test_vectorize():
s = tir.Schedule(element_wise_compute_at_split, debug_mask="all")
_, _, j1i = s.get_loops(s.get_block("C"))
s.vectorize(j1i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_compute_at_split_vectorized)
verify_trace_roundtrip(s, mod=element_wise_compute_at_split)
def test_vectorize_predicate():
s = tir.Schedule(element_wise_split_predicate, debug_mask="all")
i, _, _ = s.get_loops(s.get_block("B"))
s.vectorize(i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_vectorized)
verify_trace_roundtrip(s, mod=element_wise_split_predicate)
def test_vectorize_opaque_block():
s = tir.Schedule(opaque_block, debug_mask="all")
(i,) = s.get_loops(s.get_block("opaque"))
with pytest.raises(tvm.tir.ScheduleError):
s.vectorize(i)
def test_unroll():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.unroll(i)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled)
verify_trace_roundtrip(s, mod=rowsum)
def test_unroll_after_bind():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "blockIdx.x")
s.unroll(i)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled)
verify_trace_roundtrip(s, mod=rowsum)
def test_bind1():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound)
verify_trace_roundtrip(s, mod=element_wise)
def test_bind2():
s = tir.Schedule(element_wise_compute_at_split, debug_mask="all")
_, j0 = s.get_loops(s.get_block("B"))
_, j1o, _ = s.get_loops(s.get_block("C"))
s.bind(j0, "threadIdx.x")
s.bind(j1o, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_compute_at_split_j0_j1o_bound)
verify_trace_roundtrip(s, mod=element_wise_compute_at_split)
def test_bind_cross_thread_reduction():
s = tir.Schedule(rowsum, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
s.bind(k, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_cross_thread_reduction)
verify_trace_roundtrip(s, mod=rowsum)
def test_bind_not_cross_thread_reduction():
s = tir.Schedule(rowsum, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.bind(k, "blockIdx.x")
def test_bind_after_bind():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "blockIdx.x")
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound)
verify_trace_roundtrip(s, mod=element_wise)
def test_block_inside_init():
s = tir.Schedule(block_inside_init, debug_mask="all")
(i,) = s.get_loops(s.get_block("outer"))
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_block_inside_init)
verify_trace_roundtrip(s, mod=block_inside_init)
def test_vectorize_after_decompose():
s = tir.Schedule(decomposed_gemm, debug_mask="all")
jj = s.get_loops(s.get_block("C"))[-1]
s.vectorize(jj)
tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_after_vectorize)
verify_trace_roundtrip(s, mod=decomposed_gemm)
def test_nested_block_bind():
s = tir.Schedule(nested_block_bind)
block_outer = s.get_block("outer")
block_inner = s.get_block("inner")
_, j = s.get_loops(block_outer)
_, l = s.get_loops(block_inner)
s.bind(l, "threadIdx.x")
s.bind(j, "blockIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block)
verify_trace_roundtrip(s, mod=nested_block_bind)
def test_nexted_block_bind_after_cache_read():
s = tir.Schedule(nested_block_bind_after_cache_read)
block_outer = s.get_block("outer")
block_inner = s.get_block("inner")
(i,) = s.get_loops(block_outer)
(j,) = s.get_loops(block_inner)
s.bind(i, "blockIdx.x")
s.bind(j, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block_after_cache_read)
verify_trace_roundtrip(s, mod=nested_block_bind_after_cache_read)
def test_vectorize_init():
s = tir.Schedule(decomposed_gemm, debug_mask="all")
init_blk = s.get_block("init")
upd_blk = s.get_block("update")
_, _, ii_0, jj_0 = s.get_loops(init_blk)
_, _, k_1, ii_1, jj_1 = s.get_loops(upd_blk)
s.vectorize(jj_0)
tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_parallelize_init)
verify_trace_roundtrip(s, mod=decomposed_gemm)
def test_scatter_parallelize():
s = tir.Schedule(scatter_compute, debug_mask="all")
first = s.get_block("first_half")
last = s.get_block("last_half")
(i_0,) = s.get_loops(first)
(i_1,) = s.get_loops(last)
s.parallel(i_0)
s.parallel(i_1)
tvm.ir.assert_structural_equal(s.mod["main"], scatter_compute_parallelize)
verify_trace_roundtrip(s, mod=scatter_compute)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_instruction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
# mypy: ignore-errors
import sys
import pytest
import tvm.testing
from tvm.tir.schedule import BlockRV, Instruction, InstructionKind, LoopRV
def test_inst_kind_get():
kind = InstructionKind.get("EnterPostproc")
assert not kind.is_pure
assert kind.name == "EnterPostproc"
def test_inst_construct_1():
block = BlockRV()
loop0 = LoopRV()
loop1 = LoopRV()
inst = Instruction(
kind=InstructionKind.get("GetLoops"),
inputs=[block],
attrs=[],
outputs=[loop0, loop1],
)
assert str(inst) == "_, _ = sch.get_loops(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 2
assert inst.kind.same_as(InstructionKind.get("GetLoops"))
assert inst.inputs[0].same_as(block)
assert inst.outputs[0].same_as(loop0)
assert inst.outputs[1].same_as(loop1)
def test_inst_construct_2():
block = BlockRV()
inst = Instruction(
kind=InstructionKind.get("ComputeInline"),
inputs=[block],
attrs=[],
outputs=[],
)
assert str(inst) == "sch.compute_inline(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 0
assert inst.kind.same_as(InstructionKind.get("ComputeInline"))
assert inst.inputs[0].same_as(block)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_pad_einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir, te
from tvm.script import tir as T
from tvm.tir.schedule.schedule import ScheduleError
from tvm.tir.schedule.testing import verify_trace_roundtrip
from tvm.meta_schedule.testing import te_workload
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def matmul_before(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
B_shared = T.alloc_buffer((127, 127), "float32", scope="shared")
C_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
for i0, i1 in T.grid(128, 127):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
A_shared[i, j] = A[i, j]
for i0, i1 in T.grid(127, 127):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
B_shared[i, j] = B[i, j]
for i0, i1, i2 in T.grid(128, 127, 127):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C_shared[i, j] = T.float32(0)
C_shared[i, j] = C_shared[i, j] + A_shared[i, k] * B_shared[k, j]
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
C[i, j] = C_shared[i, j]
@T.prim_func
def matmul_expected(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
B_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i0, i1 in T.grid(128, 128):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(A[i, j])
T.writes(A_shared_padded[i, j])
A_shared_padded[i, j] = T.if_then_else(j < 127, A[i, j], T.float32(0), dtype="float32")
for i0, i1 in T.grid(128, 128):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(B[i, j])
T.writes(B_shared_padded[i, j])
B_shared_padded[i, j] = T.if_then_else(
i < 127 and j < 127, B[i, j], T.float32(0), dtype="float32"
)
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A_shared_padded[i, k], B_shared_padded[k, j])
T.writes(C_shared_padded[i, j])
with T.init():
C_shared_padded[i, j] = T.float32(0)
C_shared_padded[i, j] = (
C_shared_padded[i, j] + A_shared_padded[i, k] * B_shared_padded[k, j]
)
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(C_shared_padded[i, j])
T.writes(C[i, j])
C[i, j] = C_shared_padded[i, j]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_pad_matmul():
sch = tir.Schedule(matmul_before, debug_mask="all")
C = sch.get_block("C_shared")
sch.pad_einsum(C, [0, 1, 1])
tvm.ir.assert_structural_equal(matmul_expected, sch.mod["main"])
verify_trace_roundtrip(sch, mod=matmul_before)
def test_pad_matmul_error_non_intermediate_buffer():
func = te.create_prim_func(te_workload.matmul(128, 127, 127))
sch = tir.Schedule(func, debug_mask="all")
C = sch.get_block("C")
with pytest.raises(ScheduleError):
sch.pad_einsum(C, [0, 1, 1])
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def rowsum_blockized(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [32, 4])
A = T.match_buffer(a, [32, 4, 128])
for i0, i2_0 in T.grid(32, 16):
with T.block("blockized_B"):
io, ko = T.axis.remap("SR", [i0, i2_0])
with T.init():
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii_init = T.axis.S(4, i1)
B[io, ii_init] = 0.0
for i1_1, i2_1 in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1_1)
k = T.axis.R(128, ko * 8 + i2_1)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose0(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [32, 4, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [32, 4], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 32):
with T.block("blockized_B_init"):
io = T.axis.S(32, i0)
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii = T.axis.S(4, i1)
B[io, ii] = T.float32(0)
for i0, i2_o in T.grid(32, 16):
with T.block("blockized_B_update"):
io, ko = T.axis.remap("SR", [i0, i2_o])
for i1, i2_i in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1)
k = T.axis.R(128, ko * 8 + i2_i)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul_decompose2(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(128, 128):
with T.block("update_init"):
vi_init, vj_init = T.axis.remap("SS", [i0, i1])
C[vi_init, vj_init] = T.float32(0)
for i2 in T.serial(0, 128):
with T.block("update_update"):
vi, vj, vk = T.axis.remap("SSR", [i0, i1, i2])
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_decompose_fail3(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, k, j in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose4(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
for i0_0 in T.serial(0, 16):
for i0_1_init, i1_init in T.grid(8, 128):
with T.block("update_init"):
vi_init = T.axis.S(128, i0_0 * 8 + i0_1_init)
vj_init = T.axis.S(128, i1_init)
C[vi_init, vj_init] = T.float32(0)
for i0_1, i1, i2_0, i2_1 in T.grid(8, 128, 19, 7):
with T.block("update_update"):
T.where((((i2_0 * 7) + i2_1) < 128))
vi = T.axis.S(128, i0_0 * 8 + i0_1)
vj = T.axis.S(128, i1)
vk = T.axis.R(128, i2_0 * 7 + i2_1)
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
T.block_attr({"test_annotation": 1})
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def colsum_with_vectorization(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 32], dtype="float32")
B = T.match_buffer(b, [32], dtype="float32")
for k in T.serial(0, 128):
for i in T.vectorized(0, 32):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vk, vi]
@T.prim_func
def colsum_decompose_with_vectorization(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 32], dtype="float32")
B = T.match_buffer(b, [32], dtype="float32")
for i in T.vectorized(0, 32):
with T.block("B_init"):
vi = T.axis.S(32, i)
B[vi] = T.float32(0)
for k in T.serial(0, 128):
for i in T.vectorized(0, 32):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
B[vi] = B[vi] + A[vk, vi]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_reduction_decompose0(use_block_name):
s = tir.Schedule(matmul, debug_mask="all")
C = "update" if use_block_name else s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose0, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose1(use_block_name):
s = tir.Schedule(rowsum_blockized, debug_mask="all")
blockized_B = "blockized_B" if use_block_name else s.get_block("blockized_B")
io, ko = s.get_loops(blockized_B)
s.decompose_reduction(blockized_B, io)
tvm.ir.assert_structural_equal(matmul_decompose1, s.mod["main"])
verify_trace_roundtrip(s, mod=rowsum_blockized)
def test_reduction_decompose2():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, k)
tvm.ir.assert_structural_equal(matmul_decompose2, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose3():
s = tir.Schedule(matmul_decompose_fail3, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
with pytest.raises(tvm.tir.ScheduleError):
s.decompose_reduction(C, k)
def test_reduction_decompose4():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[16, 8])
ko, ki = s.split(k, factors=[19, 7])
s.decompose_reduction(C, ii)
tvm.ir.assert_structural_equal(matmul_decompose4, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose_with_annotation():
s = tir.Schedule(matmul_with_annotation, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose_with_annotation, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul_with_annotation)
def test_reduction_decompose_with_different_for_kind():
s = tir.Schedule(colsum_with_vectorization, debug_mask="all")
B = s.get_block("B")
k, _ = s.get_loops(B)
B_init = s.decompose_reduction(B, k)
tvm.ir.assert_structural_equal(s.mod["main"], colsum_decompose_with_vectorization)
assert s.get(B).same_as(s.get(s.get_block("B_update")))
assert s.get(B_init).same_as(s.get(s.get_block("B_init")))
verify_trace_roundtrip(s, mod=colsum_with_vectorization)
def test_decompose_reduction_ref_hash_check():
mod = tvm.IRModule.from_expr(matmul)
mod_bak = mod
hash_before = tvm.ir.structural_hash(mod_bak)
s = tir.Schedule(mod["main"], debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, k)
hash_after = tvm.ir.structural_hash(mod_bak)
assert hash_before == hash_after
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_reindex.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.schedule import ScheduleError
from tvm.tir.schedule.testing import verify_trace_roundtrip
@T.prim_func
def transpose_elementwise(
A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] * 2.0
@T.prim_func
def transpose_elementwise_reindex_read(
A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]
) -> None:
A_reindex = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A_reindex"):
vi, vj = T.axis.remap("SS", [i, j])
A_reindex[vi, vj] = A[vj, vi]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_reindex[vi, vj] * 2.0
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_reindex_data(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
ReindexInput = T.alloc_buffer([1, 112, 112, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 112, 112, 7, 7, 3):
with T.block("ReindexInput"):
n, h, w, rh, rw, rc = T.axis.remap("SSSSSS", [i0, i1, i2, i3, i4, i5])
ReindexInput[n, h, w, rh, rw, rc] = PadInput[n, ((h * 2) + rh), ((w * 2) + rw), rc]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
ReindexInput[n, h, w, rh, rw, rc] * Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_reindex_weight(
var_inputs: T.handle, var_weight: T.handle, var_conv2d_nhwc: T.handle
) -> None:
inputs = T.match_buffer(var_inputs, [1, 224, 224, 3], dtype="float32")
weight = T.match_buffer(var_weight, [7, 7, 3, 64], dtype="float32")
conv2d_nhwc = T.match_buffer(var_conv2d_nhwc, [1, 112, 112, 64], dtype="float32")
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
weight_reindex = T.alloc_buffer([64, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax3, ax4, ax5, ax6 in T.grid(64, 7, 7, 3):
with T.block("weight_reindex"):
v3, v4, v5, v6 = T.axis.remap("SSSS", [ax3, ax4, ax5, ax6])
T.reads(weight[v4, v5, v6, v3])
T.writes(weight_reindex[v3, v4, v5, v6])
weight_reindex[v3, v4, v5, v6] = weight[v4, v5, v6, v3]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight_reindex[co, rh, rw, rc],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc]
* weight_reindex[co, rh, rw, rc]
)
@T.prim_func
def matmul(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_reindex_write(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
C_reindex = T.alloc_buffer([512, 512], dtype="float32")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
@T.prim_func
def multiple_read(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] + A[vi, vj]
@T.prim_func
def mixed_dtype(
p0: T.Buffer[(T.int64(2), 1280), "float16"],
p1: T.Buffer[(1280, 1280), "float16"],
T_matmul_NT: T.Buffer[(T.int64(2), 1280), "float16"],
) -> None:
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2), i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT[i, j])
with T.init():
T_matmul_NT[i, j] = T.float16(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1[j, k]
@T.prim_func
def mixed_dtype_reindex_write(
p0: T.Buffer[(T.int64(2), 1280), "float16"],
p1: T.Buffer[(1280, 1280), "float16"],
T_matmul_NT: T.Buffer[(T.int64(2), 1280), "float16"],
) -> None:
T_matmul_NT_reindex = T.alloc_buffer([T.int64(2), 1280], dtype="float16")
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2), i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT_reindex[i, j])
with T.init():
T_matmul_NT_reindex[i, j] = T.float16(0)
T_matmul_NT_reindex[i, j] = T_matmul_NT_reindex[i, j] + p0[i, k] * p1[j, k]
for ax0, ax1 in T.grid(T.int64(2), 1280):
with T.block("T_matmul_NT_reindex"):
v0 = T.axis.spatial(T.int64(2), ax0)
v1 = T.axis.remap("S", [ax1])
T.reads(T_matmul_NT_reindex[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_reindex[v0, v1]
@T.prim_func
def matmul_unit_dim(
A: T.Buffer[(1, 512), "float32"],
B: T.Buffer[(512, 1), "float32"],
C: T.Buffer[(1, 1), "float32"],
) -> None:
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_unit_dim_reindex_write(
A: T.Buffer[(1, 512), "float32"],
B: T.Buffer[(512, 1), "float32"],
C: T.Buffer[(1, 1), "float32"],
) -> None:
C_reindex = T.alloc_buffer([1, 1], dtype="float32")
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(1, 1):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
use_buffer_name = tvm.testing.parameter(by_dict={"buffer_index": False, "buffer_name": True})
def test_reindex_read_basic(use_block_name, use_buffer_name):
sch = tir.Schedule(transpose_elementwise)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(transpose_elementwise_reindex_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=transpose_elementwise)
def test_conv2d_reindex_weight(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "Weight" if use_buffer_name else ("read", 1)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_weight, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_conv2d_reindex_data(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "PadInput" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_data, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_matmul_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul)
def test_reindex_fail_multiple_read(use_block_name, use_buffer_name):
sch = tir.Schedule(multiple_read)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
with pytest.raises(ScheduleError):
sch.reindex(block, buf)
def test_reindex_mixed_dtype(use_block_name, use_buffer_name):
sch = tir.Schedule(mixed_dtype)
block = "T_matmul_NT" if use_block_name else sch.get_block("T_matmul_NT")
buf = "T_matmul_NT" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(mixed_dtype_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=mixed_dtype)
def test_matmul_unit_dim_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul_unit_dim)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_unit_dim_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul_unit_dim)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_reorder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 8):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
vl = T.axis.S(128, l * 16)
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_dependent_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i in T.serial(0, 128):
for j, k, l in T.grid(128, i, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_loops_not_same_scope(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
for k in T.serial(0, 128):
with T.block("B"):
vk = T.axis.S(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_wrong_block_var_type(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
vk = T.axis.scan(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_reordered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for k, j, i, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered_with_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for i, j in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
@T.prim_func
def opaque_access_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for j, i in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for j, i in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
# pylint: enable=no-member,invalid-name,unused-variable
def test_reorder():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.reorder(l, i)
tvm.ir.assert_structural_equal(elementwise_reordered, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder2():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.reorder(k, i, l)
tvm.ir.assert_structural_equal(elementwise_reordered2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
sch.reorder(j, i)
block_b = sch.get_block("B")
i, j = sch.get_loops(block_b)
sch.reorder(j, i)
tvm.ir.assert_structural_equal(opaque_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_reorder_overlapped_access():
@T.prim_func
def overlapped_access(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
# example to write first axis multiple times
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def overlapped_access_reorder(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
# example to write first axis multiple times
for v0, v2, v1 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(overlapped_access, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
sch.reorder(v0, v2, v1)
tvm.ir.assert_structural_equal(overlapped_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=overlapped_access)
def test_reorder_with_partial_affineness():
@T.prim_func
def non_affine_func(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def non_affine_func_reorder(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v2, v1 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(non_affine_func, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(v0, v2, v1)
sch.reorder(v2, v1)
tvm.ir.assert_structural_equal(non_affine_func_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_affine_func)
def test_reorder_with_cascade_tiled_ops():
@T.prim_func
def cascade_pool_ops(
x: T.Buffer[(1, 16, 112, 112), "float32"], y2: T.Buffer[(1, 16, 108, 108), "float32"]
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h, w, kh, kw in T.grid(1, 16, 110, 110, 3, 3):
with T.block("pool_0"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
for n, c, h, w, kh, kw in T.grid(1, 16, 108, 108, 3, 3):
with T.block("pool_1"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
@T.prim_func
def cascade_pool_ops_tile_reordered(
x: T.Buffer[(1, 16, 112, 112), "float32"], y2: T.Buffer[(1, 16, 108, 108), "float32"]
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h_o in T.grid(1, 16, 27):
for w, h_i, kh, kw in T.grid(110, 6, 3, 3):
with T.block("pool_0"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(16, c)
ax2 = T.axis.spatial(110, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = (
y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
for h_i, w, kh, kw in T.grid(4, 108, 3, 3):
with T.block("pool_1"):
ax0 = T.axis.spatial(1, n)
ax1 = T.axis.spatial(16, c)
ax2 = T.axis.spatial(108, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = (
y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
sch = tvm.tir.schedule.Schedule(cascade_pool_ops)
pool_0 = sch.get_block("pool_0")
pool_1 = sch.get_block("pool_1")
_, _, h, w, _, _ = sch.get_loops(pool_1)
ho, _ = sch.split(h, factors=[None, 4])
sch.compute_at(pool_0, ho)
_, _, _, h_i, w, _, _ = sch.get_loops(pool_0)
sch.reorder(w, h_i)
tvm.ir.assert_structural_equal(cascade_pool_ops_tile_reordered, sch.mod["main"], True)
verify_trace_roundtrip(sch=sch, mod=cascade_pool_ops)
def test_reorder_with_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_with_multi_appearance_loops():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i, i)
def test_reorder_fail_with_non_single_branch_loop():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
i, j, k1 = sch.get_loops(block_b)
_, _, k2 = sch.get_loops(block_c)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k1, i, k2)
def test_reorder_fail_with_loops_not_under_same_scope():
sch = tir.Schedule(elementwise_with_loops_not_same_scope, debug_mask="all")
block_b = sch.get_block("B")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
k = sch.get_loops(block_b)[0]
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_wrong_block_var_type():
sch = tir.Schedule(elementwise_with_wrong_block_var_type, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_dependent_loops():
sch = tir.Schedule(elementwise_dependent_loop, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_not_affine_bindings():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_rfactor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir, topi
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def transformed_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def transformed_matmul_with_let(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
v_C: T.float32 = C[vi, vj] + (A[vi, vk] * B[vj, vk])
C[vi, vj] = v_C
@T.prim_func
def matmul_rfactor(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
C_rf = T.alloc_buffer([4, 128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update_rf"):
vi2_inner_inner = T.axis.S(4, i2_inner_inner)
vi = T.axis.S(128, i0)
vj = T.axis.S(128, i1)
vi2_outer = T.axis.R(4, i2_outer)
vi2_inner_outer = T.axis.R(8, i2_inner_outer)
with T.init():
C_rf[vi2_inner_inner, vi, vj] = 0.0
C_rf[vi2_inner_inner, vi, vj] = C_rf[vi2_inner_inner, vi, vj] + (
A[vi, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
* B[vj, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
)
for i0_1, i1_1, i2_inner_inner_1 in T.grid(128, 128, 4):
with T.block("update"):
vi2_inner_inner_1, vi_1, vj_1 = T.axis.remap("RSS", [i2_inner_inner_1, i0_1, i1_1])
with T.init():
C[vi_1, vj_1] = 0.0
C[vi_1, vj_1] = C[vi_1, vj_1] + C_rf[vi2_inner_inner_1, vi_1, vj_1]
@T.prim_func
def matmul_not_stage_pipeline(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [256, 256])
B = T.match_buffer(b, [256, 256])
D = T.match_buffer(d, [256, 256])
C = T.alloc_buffer([256, 256])
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(256, 256):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = C[vi, vj]
@T.prim_func
def matmul_not_same_buffer_access(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vj, vi] = C[vj, vi] + A[vi, vk] * B[vk, vj]
@T.prim_func
def matmul_loop_multiple_children(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
D = T.match_buffer(d, [128, 128])
for k, i, j in T.grid(128, 128, 128):
with T.block("C"):
ck, ci, cj = T.axis.remap("RSS", [k, i, j])
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, ck] * B[ck, cj]
with T.block("D"):
dk, di, dj = T.axis.remap("RSS", [k, i, j])
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + B[di, dk] * A[dk, dj]
@T.prim_func
def square_sum(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1])
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def transformed_square_sum_square_root(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
T.reads([A[b, i, j]])
T.writes([C[b]])
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
T.reads([C[b_1]])
T.writes([D[b_1]])
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_rfactor(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
C_rf = T.alloc_buffer([1, 16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
vi1_i2_fused_inner, b = T.axis.remap("SS", [i1_i2_fused_inner, i0])
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C_rf[vi1_i2_fused_inner, b] = 0.0
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + (A[b, i, j] * A[b, i, j])
for i0_1, i1_i2_fused_inner_1 in T.grid(16, 1):
with T.block("C"):
vi1_i2_fused_inner_1, b_1 = T.axis.remap("RS", [i1_i2_fused_inner_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[vi1_i2_fused_inner_1, b_1]
for i0_2 in T.serial(0, 16):
with T.block("D"):
b_2 = T.axis.S(16, i0_2)
D[b_2] = T.sqrt(C[b_2], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_1(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_1_rfactor(
A: T.Buffer[(16, 256, 256), "float32"], D: T.Buffer[(16,), "float32"]
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([1, 16], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_outer // 256)
j = T.axis.reduce(256, i1_i2_fused_outer % 256)
vi1_i2_fused_inner = T.axis.spatial(1, i1_i2_fused_inner)
with T.init():
C_rf[vi1_i2_fused_inner, b] = T.float32(0)
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_inner in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_inner = T.axis.remap("SR", [i0, i1_i2_fused_inner])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[vi1_i2_fused_inner, b]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_2(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_inner, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_inner, 256))
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_2_rfactor(
A: T.Buffer[(16, 256, 256), "float32"], D: T.Buffer[(16,), "float32"]
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([16, 1], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_inner // 256)
j = T.axis.reduce(256, i1_i2_fused_inner % 256)
vi1_i2_fused_outer = T.axis.spatial(1, i1_i2_fused_outer)
with T.init():
C_rf[b, vi1_i2_fused_outer] = T.float32(0)
C_rf[b, vi1_i2_fused_outer] = C_rf[b, vi1_i2_fused_outer] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_outer in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_outer = T.axis.remap("SR", [i0, i1_i2_fused_outer])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[b, vi1_i2_fused_outer]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_with_annotation(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_with_annotation_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
T.block_attr({"test_annotation": 1})
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1])
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def element_wise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def rowsum(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, T.floordiv(k * k, 2))
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_dominant(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi, vk] = 0.0
B[vi, vk] = B[vi, vk] + A[vi, vk]
@T.prim_func
def rowsum_not_serial(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i in T.serial(0, 128):
for k in T.parallel(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 1.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def rowsum_init_not_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
v_init: T.float32 = T.float32(0)
B[vi] = v_init
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_transformed(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for io, ii_ko_fused, ki in T.grid(32, 128, 4):
with T.block("B"):
vi = T.axis.S(128, io * 4 + T.floordiv(ii_ko_fused, 32))
vk = T.axis.R(128, T.floormod(ii_ko_fused, 32) * 4 + ki)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_zero_dim(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
for k0 in range(128):
with T.block("B"):
k = T.axis.R(128, k0)
with T.init():
B[()] = 0.0
B[()] = B[()] + A[k]
@T.prim_func
def rowsum_zero_dim_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
B_rf = T.alloc_buffer([128])
for i in range(128):
with T.block("B_rf"):
vi0 = T.axis.S(128, i)
B_rf[vi0] = A[vi0]
for i in range(128):
with T.block("B"):
vi0_1 = T.axis.R(128, i)
with T.init():
B[()] = 0.0
B[()] = B[()] + B_rf[vi0_1]
@T.prim_func
def rowsum_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B"):
T.where(k_0 * 10 + k_1 < 128)
vi = T.axis.S(128, i)
vk = T.axis.R(128, k_0 * 10 + k_1)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_predicate_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
B_rf = T.alloc_buffer([128, 13], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B_rf"):
vk_0, vi, vk_1 = T.axis.remap("SSR", [k_0, i, k_1])
T.where(k_0 * 10 + k_1 < 128)
with T.init():
B_rf[vi, vk_0] = T.float32(0)
B_rf[vi, vk_0] = B_rf[vi, vk_0] + A[vi, vk_0 * 10 + vk_1]
for i, k_0 in T.grid(128, 13):
with T.block("B"):
vk_0, vi = T.axis.remap("RS", [k_0, i])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf[vi, vk_0]
@T.prim_func
def multiple_reduction_blocks(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16))
C = T.alloc_buffer((16, 16))
D = T.alloc_buffer((16, 16))
E = T.alloc_buffer((16, 16))
F = T.match_buffer(f, (16, 16))
for i in T.serial(0, 16):
for j1 in T.serial(0, 16):
for k1o, k1i in T.grid(4, 4):
with T.block("C"):
ci, cj = T.axis.remap("SS", [i, j1])
ck = T.axis.R(16, k1o * 4 + k1i)
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, cj, ck]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i, j1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + A[di, dj, dk] + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = E[ei, ej] + A[ei, ej, ek] + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = F[fi, fj] + A[fi, fj, fk] + E[fi, fj]
@T.prim_func
def multiple_reduction_blocks_rfactor(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16])
C = T.alloc_buffer([16, 16])
D = T.alloc_buffer([16, 16])
E = T.alloc_buffer([16, 16])
F = T.match_buffer(f, [16, 16])
C_rf = T.alloc_buffer([16, 16, 4])
for i, j1, k1o, k1i in T.grid(16, 16, 4, 4):
with T.block("C_rf"):
vk1o, ci, cj, vk1i = T.axis.remap("SSSR", [k1o, i, j1, k1i])
with T.init():
C_rf[ci, cj, vk1o] = 0.0
C_rf[ci, cj, vk1o] = C_rf[ci, cj, vk1o] + A[ci, cj, ((vk1o * 4) + vk1i)]
for i_1 in T.serial(0, 16):
for j1_1 in T.serial(0, 16):
for k1o_1 in T.serial(0, 4):
with T.block("C"):
vk1o_1, ci_1, cj_1 = T.axis.remap("RSS", [k1o_1, i_1, j1_1])
with T.init():
C[ci_1, cj_1] = 0.0
C[ci_1, cj_1] = C[ci_1, cj_1] + C_rf[ci_1, cj_1, vk1o_1]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i_1, j1_1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = (D[di, dj] + A[di, dj, dk]) + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i_1, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = (E[ei, ej] + A[ei, ej, ek]) + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i_1, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = (F[fi, fj] + A[fi, fj, fk]) + E[fi, fj]
@T.prim_func
def rfactor_spatial_only(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
rv0 = T.axis.reduce(7, i4 // 7)
rv1 = T.axis.reduce(7, i4 % 7)
T.reads(A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1])
T.writes(B[ax0, ax1, ax2, ax3])
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = (
B[ax0, ax1, ax2, ax3] + A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1]
)
@T.prim_func
def rfactor_spatial_only_after(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
# body
# with T.block("root")
B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc_rf"):
vi4 = T.axis.spatial(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
B_rf[ax0, ax1, ax2, ax3, vi4] = A[ax0, ax1, ax2 * 7 + vi4 // 7, ax3 * 7 + vi4 % 7]
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
vi4 = T.axis.reduce(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = B[ax0, ax1, ax2, ax3] + B_rf[ax0, ax1, ax2, ax3, vi4]
@T.prim_func
def argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_init_update_reordered(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.max_value("float32")
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k])
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def argmax_split_different_shape(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(256,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_different_indices(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i + 1] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i + 1] = v_argmax_v1
@T.prim_func
def argmax_split_init_not_bufferstore(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v1_init: T.float32 = T.min_value("float32")
argmax_v1[i] = v1_init
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_duplicate(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v0[i] = -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_letstmt_fewer_than_init(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
@T.prim_func
def argmax_split_letstmt_more_than_init(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_let_body_neither_seqstmt_nor_bufferstore(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
T.evaluate(0)
@T.prim_func
def argmax_split_init_update_inconsistent_bufferstore_number(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_seq_not_bufferstore(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
T.evaluate(0)
@T.prim_func
def argmax_split_body_bufferstore_value_not_var(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_bufferstore_value_unbound_var(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
v_unbound = T.var("int32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_unbound
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_one_let_var_used_multi_times(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "int32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "int32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v0
@T.prim_func
def argmax_split_body_one_buffer_updated_multi_times(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "int32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "int32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v0[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_not_match(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v0_1: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v0_1[i], argmax_v1[i])
with T.init():
argmax_v0_1[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_rfactor(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
argmax_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
with T.init():
argmax_v0_rf[i, vi1_1] = -1
argmax_v1_rf[i, vi1_1] = T.min_value("float32")
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmax_v0_rf[i, vi1_1] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_1] = v_argmax_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmax"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v0[i], argmax_v0_rf[i, vi1_1]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v1[i], argmax_v1_rf[i, vi1_1]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_rfactor(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
argmin_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmin_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
with T.init():
argmin_v0_rf[i, vi1_1] = -1
argmin_v1_rf[i, vi1_1] = T.max_value("float32")
v_argmin_v0_rf: T.int32 = T.Select(
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmin_v1_rf: T.float32 = T.Select(
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmin_v0_rf[i, vi1_1] = v_argmin_v0_rf
argmin_v1_rf[i, vi1_1] = v_argmin_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmin"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v0[i] = -1
argmin_v1[i] = T.max_value("float32")
v_argmin_v0: T.int32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v0[i], argmin_v0_rf[i, vi1_1]
)
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v1[i], argmin_v1_rf[i, vi1_1]
)
argmin_v0[i] = v_argmin_v0
argmin_v1[i] = v_argmin_v1
@T.prim_func
def argmax_topi_rfactor(
placeholder: T.Buffer[(1, 32), "int32"], placeholder_red: T.Buffer[1, "int32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = -2147483648
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
placeholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = -2147483648
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
@T.prim_func
def argmin_topi_rfactor(
placeholder: T.Buffer[(1, 32), "int32"], placeholder_red: T.Buffer[1, "int32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = 2147483647
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
placeholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = 2147483647
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_reduction_rfactor_matmul():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_matmul_with_let():
s = tir.Schedule(transformed_matmul_with_let, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul_with_let)
def test_reduction_rfactor_square_sum():
s = tir.Schedule(square_sum, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum)
def test_reduction_rfactor_square_sum_square_root():
s = tir.Schedule(transformed_square_sum_square_root, debug_mask="all")
C = s.get_block("C")
_, _, f_i = s.get_loops(C)
rf_block = s.rfactor(f_i, 0)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_square_root_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=transformed_square_sum_square_root)
def test_reduction_rfactor_loop_multiple_children():
s = tir.Schedule(matmul_loop_multiple_children, debug_mask="all")
k, _, _ = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_stage_pipeline():
s = tir.Schedule(matmul_not_stage_pipeline, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block1():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_not_reduction_block2():
s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block3():
s = tir.Schedule(rowsum_not_dominant, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_serial_loop():
s = tir.Schedule(rowsum_not_serial, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_same_buffer_access():
s = tir.Schedule(matmul_not_same_buffer_access, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_factor_axis_range_fail():
s = tir.Schedule(transformed_matmul, debug_mask="all")
_, _, _, _, kii = s.get_loops(s.get_block("update"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, 3)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, -4)
def test_reduction_rfactor_factor_axis_range():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, -3)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_wrong_reduce_pattern1():
s = tir.Schedule(rowsum_wrong_reduce_pattern1, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_reduce_pattern2():
s = tir.Schedule(rowsum_wrong_reduce_pattern2, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_init_not_bufferstore():
s = tir.Schedule(rowsum_init_not_bufferstore, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_loops1():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_wrong_loops2():
s = tir.Schedule(rowsum_transformed, debug_mask="all")
_, _, k_i = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k_i, 0)
def test_reduction_rfactor_zero_dim():
s = tir.Schedule(rowsum_zero_dim, debug_mask="all")
B = s.get_block("B")
(k,) = s.get_loops(B)
rf_block = s.rfactor(k, 0)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_zero_dim_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("B_rf")))
assert s.get(B).same_as(s.get(s.get_block("B")))
verify_trace_roundtrip(s, mod=rowsum_zero_dim)
def test_reduction_rfactor_outermost_loop_multiple_children_fail(): # pylint: disable=invalid-name
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
_, _, k2o, k2i = s.get_loops(s.get_block("D"))
_, _, k3o, k3i = s.get_loops(s.get_block("E"))
_, _, k4o, k4i = s.get_loops(s.get_block("F"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4i, 0)
def test_reduction_rfactor_outermost_loop_multiple_children(): # pylint: disable=invalid-name
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
C = s.get_block("C")
_, _, k1o, _ = s.get_loops(C)
rf_block = s.rfactor(k1o, 2)
tvm.ir.assert_structural_equal(s.mod["main"], multiple_reduction_blocks_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=multiple_reduction_blocks)
def test_reduction_rfactor_predicate(): # pylint: disable=invalid-name
s = tir.Schedule(rowsum_predicate, debug_mask="all")
B = s.get_block("B")
_, ko, _ = s.get_loops(B)
# TODO: should be a tvm.tir.ScheduleError
with pytest.raises(tvm.TVMError):
rf_block = s.rfactor(ko, 1)
def test_reduction_rfactor_with_annotation():
s = tir.Schedule(square_sum_with_annotation, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_with_annotation_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum_with_annotation)
def test_reduction_rfactor_spatial_only():
s = tir.Schedule(rfactor_spatial_only, debug_mask="all")
block = s.get_block(name="acc", func_name="main")
_, _, _, _, loop, _ = s.get_loops(block)
rf_block = s.rfactor(loop=loop, factor_axis=4)
tvm.ir.assert_structural_equal(s.mod["main"], rfactor_spatial_only_after)
assert s.get(rf_block).same_as(s.get(s.get_block("acc_rf")))
assert s.get(block).same_as(s.get(s.get_block("acc")))
verify_trace_roundtrip(s, mod=rfactor_spatial_only)
def test_reduction_rfactor_argmax():
s = tir.Schedule(argmax_split, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmax_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("argmax")))
verify_trace_roundtrip(s, mod=argmax_split)
def test_reduction_rfactor_argmin_init_update_reordeded():
s = tir.Schedule(argmin_split_init_update_reordered, debug_mask="all")
argmin = s.get_block("argmin")
_, _, ki = s.get_loops(argmin)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmin_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("argmin")))
verify_trace_roundtrip(s, mod=argmin_split_init_update_reordered)
def test_reduction_rfactor_argmax_reduction_buffer_different_shape():
s = tir.Schedule(argmax_split_different_shape, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_different_access_indices():
s = tir.Schedule(argmax_split_different_indices, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_not_bufferstore():
s = tir.Schedule(argmax_split_init_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_duplicate():
s = tir.Schedule(argmax_split_init_buffer_duplicate, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_fewer_than_init():
s = tir.Schedule(argmax_split_letstmt_fewer_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_more_than_init():
s = tir.Schedule(argmax_split_letstmt_more_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_let_body_neither_seqstmt_nor_bufferstore():
s = tir.Schedule(argmax_split_let_body_neither_seqstmt_nor_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_update_inconsistent_bufferstore_number():
s = tir.Schedule(argmax_split_init_update_inconsistent_bufferstore_number, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_seq_not_bufferstore():
s = tir.Schedule(argmax_split_body_seq_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_not_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_not_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_unbound_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_unbound_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_one_let_var_used_multi_times():
s = tir.Schedule(argmax_split_one_let_var_used_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_one_buffer_updated_multi_times():
s = tir.Schedule(argmax_split_body_one_buffer_updated_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_not_match():
s = tir.Schedule(argmax_split_init_buffer_not_match, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_topi_argmax():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmax(A, axis=1)
argmax_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmax_topi, debug_mask="all")
argmax = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmax)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmax_topi)
def test_reduction_rfactor_topi_argmin():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmin(A, axis=1)
argmin_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmin_topi, debug_mask="all")
argmin = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmin)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmin_topi)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_rolling_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import numpy as np
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
import pytest
def check_rolling_buffer(
sch: tir.Schedule, origin: tir.PrimFunc, expected: tir.PrimFunc, check_run=False
):
scheduled = sch.mod["main"]
tvm.ir.assert_structural_equal(scheduled, expected)
verify_trace_roundtrip(sch, origin)
if check_run:
in_buffer = origin.buffer_map[origin.params[0]]
out_buffer = origin.buffer_map[origin.params[1]]
in_shape = [int(_) for _ in in_buffer.shape]
out_shape = [int(_) for _ in out_buffer.shape]
x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype))
y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
f_origin = tvm.build(origin)
f_scheduled = tvm.build(scheduled)
f_origin(x, y0)
f_scheduled(x, y1)
tvm.testing.assert_allclose(y0.numpy(), y1.numpy())
def _tile_nd(s, tile, block_name):
outer_indices = []
inner_indices = []
block = s.get_block(block_name)
loops = s.get_loops(block)
for i, size in enumerate(tile):
outer, inner = s.split(loops[i], [None, size])
outer_indices.append(outer)
inner_indices.append(inner)
s.reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
def test_1d_rolling_buffer():
@T.prim_func
def before(A: T.Buffer[(4, 12), "int32"], C: T.Buffer[(4, 8), "int32"]):
B = T.alloc_buffer((4, 10), "int32")
for c in T.serial(4):
for i in T.serial(0, 10):
for k in T.serial(3):
with T.block("B"):
cc, vi, vk = T.axis.remap("SSR", [c, i, k])
with T.init():
B[cc, vi] = 0
B[cc, vi] = B[cc, vi] + A[cc, vi + vk]
for i in T.serial(0, 8):
for k in T.serial(3):
with T.block("C"):
cc, vi, vk = T.axis.remap("SSR", [c, i, k])
with T.init():
C[cc, vi] = 0
C[cc, vi] = C[cc, vi] + B[cc, vi + vk]
@T.prim_func
def expected(A: T.Buffer[(4, 12), "int32"], C: T.Buffer[(4, 8), "int32"]):
B = T.alloc_buffer([4, 6], dtype="int32")
for c, i_0 in T.grid(4, 2):
for ax0, ax1 in T.grid(6, 3):
with T.block("B"):
T.where(i_0 < 1 or 2 <= ax0)
cc = T.axis.spatial(4, c)
vi = T.axis.opaque(10, i_0 * 4 + ax0)
vk = T.axis.reduce(3, ax1)
T.reads(A[cc, vi + vk])
T.writes(B[cc, vi % 6])
with T.init():
B[cc, vi % 6] = 0
B[cc, vi % 6] = B[cc, vi % 6] + A[cc, vi + vk]
for i_1, k in T.grid(4, 3):
with T.block("C"):
cc = T.axis.spatial(4, c)
vi = T.axis.opaque(8, i_0 * 4 + i_1)
vk = T.axis.reduce(3, k)
T.reads(B[cc, (vi + vk) % 6])
T.writes(C[cc, vi])
with T.init():
C[cc, vi] = 0
C[cc, vi] = C[cc, vi] + B[cc, (vi + vk) % 6]
sch = tir.Schedule(before, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("C"))
io, _ = sch.split(i, [2, 4])
sch.compute_at(sch.get_block("B"), io)
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, before, expected, check_run=True)
@T.prim_func
def cascade_2_max_pool2d(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 10, 10, 16], dtype="int8")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3):
with T.block("B"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B[ax0, ax1, ax2, ax3] = T.int8(-128)
B[ax0, ax1, ax2, ax3] = T.max(B[ax0, ax1, ax2, ax3], A[ax0, ax1 + rv0, ax2 + rv1, ax3])
for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3):
with T.block("C"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3])
@T.prim_func
def cascade_3_max_pool2d_with_stride(
A: T.Buffer[(1, 24, 24, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]
):
B_0 = T.alloc_buffer([1, 22, 22, 16], dtype="int8")
B_1 = T.alloc_buffer([1, 10, 10, 16], dtype="int8")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 22, 22, 16, 3, 3):
with T.block("B_0"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B_0[ax0, ax1, ax2, ax3] = T.int8(-128)
B_0[ax0, ax1, ax2, ax3] = T.max(
B_0[ax0, ax1, ax2, ax3], A[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3):
with T.block("B_1"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B_1[ax0, ax1, ax2, ax3] = T.int8(-128)
B_1[ax0, ax1, ax2, ax3] = T.max(
B_1[ax0, ax1, ax2, ax3], B_0[ax0, ax1 * 2 + rv0, ax2 * 2 + rv1, ax3]
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3):
with T.block("C"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B_1[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
def test_cascade_max_pool2d_w_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 10, 6, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(10, 6, 16, 3, 3):
with T.block("B"):
T.where(i2_0 < 1 or 2 <= ax1)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(10, ax0)
ax2_1 = T.axis.opaque(10, i2_0 * 4 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1 % 6, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.max(
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 8, 4, 16, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(8, i1_0 * 8 + i1_1)
ax2 = T.axis.opaque(8, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
oi, _ = _tile_nd(sch, [1, 8, 4, 16], "C")
sch.compute_at(sch.get_block("B"), oi[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_max_pool2d_h_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 1, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 10, 16, 3, 3):
with T.block("B"):
T.where(i1_0 < 1 or 2 <= ax0)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(10, ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 8, 16, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 8 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = _tile_nd(sch, [1, 4, 8, 16], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_max_pool2d_h_w_c_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 2):
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 8, 3, 3):
with T.block("B"):
T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1))
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(10, i2_0 * 4 + ax1)
ax3_1 = T.axis.spatial(16, i3_0 * 8 + ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 8, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 8 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = _tile_nd(sch, [1, 4, 4, 8], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_max_pool2d_non_perfect_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]) -> None:
B = T.alloc_buffer([1, 8, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(8, 8, 16, 3, 3):
with T.block("B"):
T.where(
i1_0 * 6 + ax0 < 10
and i2_0 * 6 + ax1 < 10
and (i1_0 < 1 or 2 <= ax0)
and (i2_0 < 1 or 2 <= ax1)
)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 6 + ax0)
ax2_1 = T.axis.spatial(10, i2_0 * 6 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 8, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 6, 6, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 6 + i1_1 < 8 and i2_0 * 6 + i2_1 < 8)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 6 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 6 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % 8, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 8, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = _tile_nd(sch, [1, 6, 6, 16], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_3_max_pool2d_with_stride():
@T.prim_func
def expected(A: T.Buffer[(1, 24, 24, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]) -> None:
B_0 = T.alloc_buffer([1, 13, 22, 16], dtype="int8")
B_1 = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(13, 13, 16, 3, 3):
with T.block("B_0"):
T.where((i1_0 < 1 or 5 <= ax0) and (i2_0 < 1 or 5 <= ax1))
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(22, i1_0 * 8 + ax0)
ax2_1 = T.axis.spatial(22, i2_0 * 8 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1])
with T.init():
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.int8(-128)
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.max(
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1],
A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1],
)
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 16, 3, 3):
with T.block("B_1"):
T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1))
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_2 = T.axis.spatial(10, i2_0 * 4 + ax1)
ax3_2, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2])
T.writes(B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2])
with T.init():
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.int8(-128)
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.max(
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2],
B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2],
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 3, 3):
with T.block("C"):
ax0_3 = T.axis.spatial(1, i0_0 + i0_1)
ax1_3 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2_3 = T.axis.spatial(8, i2_0 * 4 + i2_1)
ax3_3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3])
T.writes(C[ax0_3, ax1_3, ax2_3, ax3_3])
with T.init():
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128)
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max(
C[ax0_3, ax1_3, ax2_3, ax3_3],
B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3],
)
sch = tir.Schedule(cascade_3_max_pool2d_with_stride, debug_mask="all")
io, _ = _tile_nd(sch, [1, 4, 4, 16], "C")
sch.compute_at(sch.get_block("B_1"), io[-1])
sch.compute_at(sch.get_block("B_0"), io[-1])
sch.rolling_buffer(sch.get_block("B_0"), 0)
sch.rolling_buffer(sch.get_block("B_1"), 0)
check_rolling_buffer(sch, cascade_3_max_pool2d_with_stride, expected, check_run=True)
def test_upscale():
@T.prim_func
def before(A: T.Buffer[(1, 16, 16, 16), "int8"], C: T.Buffer[(1, 24, 24, 16), "int8"]) -> None:
B = T.alloc_buffer([1, 14, 14, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3):
with T.block("B"):
T.where(i1_0 * 5 // 2 + ax0 < 14 and i2_0 * 5 // 2 + ax1 < 14)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(14, i1_0 * 5 // 2 + ax0)
ax2_1 = T.axis.spatial(14, i2_0 * 5 // 2 + ax1)
ax3_1 = T.axis.spatial(16, ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(24, i1_0 * 5 + i1_1)
ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1 // 2 + rv0, ax2 // 2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1 // 2 + rv0, ax2 // 2 + rv1, ax3]
)
@T.prim_func
def expected(
A: T.Buffer[(1, 16, 16, 16), "int8"], C: T.Buffer[(1, 24, 24, 16), "int8"]
) -> None:
B = T.alloc_buffer([1, 5, 14, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3):
with T.block("B"):
T.where(
i1_0 * 5 // 2 + ax0 < 14
and i2_0 * 5 // 2 + ax1 < 14
and (i1_0 < 1 or 2 <= ax0)
and (i2_0 < 1 or 2 <= ax1)
)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(14, i1_0 * 5 // 2 + ax0)
ax2_1 = T.axis.spatial(14, i2_0 * 5 // 2 + ax1)
ax3_1 = T.axis.spatial(16, ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 5, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(24, i1_0 * 5 + i1_1)
ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 // 2 + rv0) % 5, ax2 // 2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 // 2 + rv0) % 5, ax2 // 2 + rv1, ax3]
)
sch = tir.Schedule(before, debug_mask="all")
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, before, expected, check_run=True)
def test_fail_rolling_buffer_multi_writers():
@T.prim_func
def func_multi_writers(
A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 12, 12, 16), "int8"]
):
B = T.alloc_buffer([1, 12, 12, 16], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 3, 3, 1):
for ax0, ax1, ax2 in T.grid(6, 6, 16):
with T.block("B_writer_0"):
ax0_1 = T.axis.spatial(1, i0)
ax1_1 = T.axis.spatial(12, i1 * 4 + ax0)
ax2_1 = T.axis.spatial(12, i2 * 4 + ax1)
ax3_1 = T.axis.spatial(16, ax2)
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3_1] = A[ax0_1, ax1_1, ax2_1, ax3_1] + T.int8(1)
for ax0, ax1, ax2 in T.grid(6, 6, 16):
with T.block("B_writer_1"):
ax0_2 = T.axis.spatial(1, i0)
ax1_2 = T.axis.spatial(12, i1 * 4 + ax0)
ax2_2 = T.axis.spatial(12, i2 * 4 + ax1)
ax3_2 = T.axis.spatial(16, ax2)
with T.init():
B[ax0_2, ax1_2, ax2_2, ax3_2] = T.int8(-128)
B[ax0_2, ax1_2, ax2_2, ax3_2] = B[ax0_2, ax1_2, ax2_2, ax3_2] + A[
ax0_2, ax1_2, ax2_2, ax3_2
] * T.int8(2)
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 4, 4, 16, 3, 3):
with T.block("C"):
ax0_3 = T.axis.spatial(1, i0 + ax0)
ax1_3 = T.axis.spatial(12, i1 * 4 + ax1)
ax2_3 = T.axis.spatial(12, i2 * 4 + ax2)
ax3_3 = T.axis.spatial(16, i3 * 16 + ax3)
rv0, rv1 = T.axis.remap("RR", [ax4, ax5])
with T.init():
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128)
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max(
C[ax0_3, ax1_3, ax2_3, ax3_3], B[ax0_3, ax1_3 + rv0, ax2_3 + rv1, ax3_3]
)
sch = tir.Schedule(func_multi_writers, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(sch.get_block("B_writer_0"), 0)
def test_fail_rolling_buffer_not_match():
@T.prim_func
def func_non_overlap(
A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 12, 12, 16), "int8"]
):
B = T.alloc_buffer([1, 12, 12, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 3, 3, 1):
for ax0, ax1, ax2 in T.grid(4, 4, 16):
with T.block("B"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(12, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(12, i2_0 * 4 + ax1)
ax3 = T.axis.spatial(16, ax2)
T.reads(A[ax0_1, ax1_1, ax2_1, ax3])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3] = A[ax0_1, ax1_1, ax2_1, ax3]
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 1, 1):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(12, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(12, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1 + rv0, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
sch = tir.Schedule(func_non_overlap, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(sch.get_block("B"), 0)
def test_fail_rolling_buffer_injection_invalid():
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
# Block B is not compute_at to Block C, so rolling_buffer injection is invalid.
_, _ = _tile_nd(sch, [1, 4, 8, 16], "C")
_, _ = _tile_nd(sch, [1, 4, 8, 16], "B")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(sch.get_block("B"), 0)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_sampling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import sys
import numpy
import pytest
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 257, 1470))
B = T.match_buffer(b, (128, 257, 1470))
for i, j, k in T.grid(128, 257, 1470):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def tiled_conv2d_with_padding(
inputs: T.Buffer[(1, 224, 224, 3), "float32"],
weight: T.Buffer[(7, 7, 3, 64), "float32"],
conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_0,
i5_0,
i6_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_1,
i5_1,
i6_1,
i0_3,
i1_3,
i2_3,
i3_3,
) in T.grid(1, 1, 4, 1, 1, 2, 4, 1, 7, 7, 1, 1, 1, 1, 1, 1, 1, 3, 1, 56, 7, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(112, i1_1_1 * 56 + i1_3)
w = T.axis.spatial(112, i2_0 * 28 + i2_1_1 * 7 + i2_3)
co, rh, rw, rc = T.axis.remap("SRRR", [i3_3, i4_0, i5_0, i6_1])
T.reads(
conv2d_nhwc[n, h, w, co],
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight[rh, rw, rc, co],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc] * weight[rh, rw, rc, co]
)
# pylint: enable=no-member,invalid-name,unused-variable
def test_sample_categorical():
"""Test sample categorical sampling function"""
n = 1000
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
counter = defaultdict(int)
candidates = [5, 2, 7, 1]
probs = [0.15, 0.55, 0.05, 0.25]
for _ in range(n):
v = sch.get(sch.sample_categorical(candidates, probs))
counter[v] += 1
for i, prob in enumerate(probs):
assert (prob - 0.07) * n <= counter[candidates[i]] <= (prob + 0.07) * n
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_categorical_copy():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs) # pylint: disable=invalid-name
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions: # pylint: disable=invalid-name
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy)
def test_sample_categorical_serialize():
"""Check the random variable sampling results after schedule serialization"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [5, 6, 7, 8]
probs = [0.23, 0.19, 0.37, 0.21]
decisions = []
for _ in range(n):
rv = sch.get(sch.sample_categorical(candidates, probs)) # pylint: disable=invalid-name
decisions.append(rv)
new_sch = verify_trace_roundtrip(sch, mod=elementwise)
for i, new_inst in enumerate(new_sch.trace.insts):
assert decisions[i] == candidates[new_sch.trace.decisions[new_inst].value]
def test_sample_perfect_tile_power_of_two():
sch = tir.Schedule(elementwise, debug_mask="all")
i, _, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 128
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_prime():
sch = tir.Schedule(elementwise, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 257
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_composite():
sch = tir.Schedule(elementwise, debug_mask="all")
_, _, i = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 1470
verify_trace_roundtrip(sch, mod=elementwise)
use_sugared_block = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_sample_compute_location(use_sugared_block):
n = 100
sch = tir.Schedule(tiled_conv2d_with_padding, seed=42, debug_mask="all")
if use_sugared_block:
pad_input = "PadInput"
else:
pad_input = sch.get_block("PadInput")
decision_dict = dict()
for _ in range(n):
_ = sch.sample_compute_location(pad_input) # pylint: disable=invalid-name
decision = sch.trace.decisions[sch.trace.insts[-1]]
decision_dict[decision] = decision_dict[decision] + 1 if decision in decision_dict else 1
n_candidates = 8
expected_rate = 1.0 / n_candidates
for _, cnt in decision_dict.items():
numpy.testing.assert_allclose(expected_rate, cnt / n, atol=0.04)
def test_sample_perfect_tile_after_copy():
sch = tir.Schedule(elementwise, debug_mask="all")
sch_copy = sch.copy()
_, _, i = sch.get_loops(sch.get_block("B"))
sch.sample_perfect_tile(i, n=4)
_, _, i = sch_copy.get_loops(sch_copy.get_block("B"))
# Hangs if ForkSeed is not invoked when copying a schedule
sch_copy.sample_perfect_tile(i, n=4)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_set_axis_separator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.tir import IndexMap
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_set_axis_separator_input_buffer(A: T.Buffer(shape=(128, 128), dtype="float32", axis_separators=(1,)), C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1])
B_subregion0[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1])
C[vi, vj] = B_subregion1[()] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
argument_style = tvm.testing.parameter('set_axis_separators',
'transform_layout_named',
'transform_layout_buffer_object',
)
def test_set_axis_separator(argument_style):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
B = s.get(s.get_block('B')).writes[0].buffer
s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("write",1),[1])
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("read",-1),[1])
def test_set_axis_separator_input_buffer(argument_style):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("read",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='A', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
A = s.get(s.get_block('B')).reads[0].buffer
s.transform_layout(block='B', buffer=A, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_set_axis_separator_input_buffer, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_axis_separator_subregion(argument_style):
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
B = s.get(s.get_block('B')).writes[0].buffer
s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
class TestIndexedLookup(tvm.testing.CompareBeforeAfter):
def transform(self):
def func(mod):
sch = tir.Schedule(mod)
sch.set_axis_separator('block', 'B', [1])
return sch.mod
return func
@T.prim_func
def before():
A = T.alloc_buffer([4,4], dtype="int32")
B = T.alloc_buffer([1,1], dtype="int32")
for j in T.serial(4):
with T.block('block'):
A[B[0,0],j] = 0
@T.prim_func
def expected():
A = T.alloc_buffer([4,4], dtype="int32")
B = T.alloc_buffer([1,1], dtype="int32", axis_separators=[1])
for j in T.serial(4):
with T.block('block'):
A[B[0,0],j] = 0
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_set_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_scope(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_shared[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_scope(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0_shared = T.match_buffer(B_shared[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
B_subregion0_shared[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1_shared = T.match_buffer(B_shared[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
C[vi, vj] = B_subregion1_shared[()] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_set_scope(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
s.set_scope('B' if use_block_name else s.get_block("B"), 0, "shared")
tvm.ir.assert_structural_equal(element_wise_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_output_buffer(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope('C' if use_block_name else s.get_block("C"), 0, "shared")
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 1, "shared")
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), -1, "shared")
def test_set_scope_fail_on_invalid_scope():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 0, "test_scope")
def test_set_scope_subregion():
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
s.set_scope(s.get_block("B"), 0, "shared")
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_split_fuse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T
from tvm.tir.expr import IntImm
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_dependent_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i in T.serial(0, 128):
for j, k in T.grid(i, 128):
with T.block("B"):
vi = T.axis.S(128, i)
vj = T.axis.S(i, j)
vk = T.axis.S(128, k)
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i, j, k in T.grid(128, 128, n):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic_fused(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i_j_k_fused in T.serial(0, (n * 16384)):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_k_fused, n * 128))
vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, n * 128), n))
vk = T.axis.S(n, T.floormod(i_j_k_fused, n))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic_split(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i, j, k0, k1 in T.grid(128, 128, 10, T.floordiv((n + 9), 10)):
with T.block("B"):
T.where((((k0 * T.floordiv((n + 9), 10)) + k1) < n))
vi, vj = T.axis.remap("SS", [i, j])
vk = T.axis.S(n, k0 * T.floordiv(n + 9, 10) + k1)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_seq(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_anno(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128, annotations={"useless_annotation": True}):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_thread_binding(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_starting_point(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(10, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("opaque"):
T.reads([A[i, j, k]])
T.writes([B[i, j, k]])
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for fused in T.serial(0, 2097152):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(fused, 16384))
vj = T.axis.S(128, T.floordiv(T.floormod(fused, 16384), 128))
vk = T.axis.S(128, T.floormod(fused, 128))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_case0(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128, 128, 128])
for i1, i2, i3, j1, j2, k1, k2 in T.grid(2, 1, 64, 4, 32, 16, 8):
with T.block("B"):
vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3)
vj = T.axis.S(128, j1 * 32 + j2)
vk = T.axis.S(128, k1 * 8 + k2)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_case1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128, 128, 128])
for i1, i2, i3, j1, j2, j3, k1, k2, k3 in T.grid(2, 1, 64, 2, 1, 64, 2, 1, 64):
with T.block("B"):
vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3)
vj = T.axis.S(128, j1 * 64 + j2 * 64 + j3)
vk = T.axis.S(128, k1 * 64 + k2 * 64 + k3)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_with_predicate(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i0, i1, i2, j0, j1, k0, k1 in T.grid(1000, 2, 3, 1, 129, 3, 43):
with T.block("B"):
vi = T.axis.S(128, i0 * 6 + i1 * 3 + i2)
vj = T.axis.S(128, j0 * 129 + j1)
vk = T.axis.S(128, k0 * 43 + k1)
T.where((i0 * 2 + i1) * 3 + i2 < 128 and j0 * 129 + j1 < 128 and k0 * 43 + k1 < 128)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_fuse_with_opaque_block(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i_j_k_fused in T.serial(0, 2097152):
with T.block("opaque"):
T.reads(
[
A[
T.floordiv(i_j_k_fused, 16384),
T.floordiv(T.floormod(i_j_k_fused, 16384), 128),
T.floormod(i_j_k_fused, 128),
]
]
)
T.writes(
[
B[
T.floordiv(i_j_k_fused, 16384),
T.floordiv(T.floormod(i_j_k_fused, 16384), 128),
T.floormod(i_j_k_fused, 128),
]
]
)
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_k_fused, 16384))
vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, 16384), 128))
vk = T.axis.S(128, T.floormod(i_j_k_fused, 128))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_with_opaque_block(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i0, i1, j, k in T.grid(8, 16, 128, 128):
with T.block("opaque"):
T.reads([A[i0 * 16 + i1, j, k]])
T.writes([B[i0 * 16 + i1, j, k]])
with T.block("B"):
vi = T.axis.S(128, i0 * 16 + i1)
vj, vk = T.axis.remap("SS", [j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for i, j in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
@T.prim_func
def opaque_access_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16])
B = T.match_buffer(b, [16, 16])
for i_j_fused in T.serial(0, 256):
with T.block("A"):
vi = T.axis.S(16, T.floordiv(i_j_fused, 16))
vj = T.axis.S(16, T.floormod(i_j_fused, 16))
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i_j_fused in T.serial(0, 256):
with T.block("B"):
vi = T.axis.S(16, T.floordiv(i_j_fused, 16))
vj = T.axis.S(16, T.floormod(i_j_fused, 16))
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle"))
@T.prim_func
def opaque_access_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
B = T.match_buffer(b, (16, 16))
for i, j0, j1 in T.grid(16, 4, 4):
with T.block("A"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, j0 * 4 + j1)
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j0, j1 in T.grid(16, 4, 4):
with T.block("B"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, j0 * 4 + j1)
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle"))
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (127, 128))
B = T.match_buffer(b, (127, 128))
for i in T.serial(0, 4):
for j, k in T.grid(T.min(31, 126 - i * 32) + 1, 128):
with T.block("B"):
vi = T.axis.S(127, i * 32 + j)
vj = T.axis.S(128, k)
B[vi, vj] = A[vi, vj]
@T.prim_func
def elementwise_not_affine_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [127, 128])
B = T.match_buffer(b, [127, 128])
for i in T.grid(4):
for j_k_fused in T.serial(0, T.min(31, 126 - i * 32) * 128 + 128):
with T.block("B"):
vi = T.axis.S(
127,
i * 32 + T.floordiv(j_k_fused, 128),
)
vj = T.axis.S(128, T.floormod(j_k_fused, 128))
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = A[vi, vj]
# pylint: enable=no-member,invalid-name,unused-variable
def test_fuse():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_split():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[2, 1, 64])
sch.split(j, factors=[4, 32])
sch.split(k, factors=[16, 8])
tvm.ir.assert_structural_equal(elementwise_split_case0, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_split_with_inferred_factor():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[None, 1, 64])
sch.split(j, factors=[2, None, 64])
sch.split(k, factors=[2, 1, None])
tvm.ir.assert_structural_equal(elementwise_split_case1, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_split_with_predicate():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[1000, 2, 3])
sch.split(j, factors=[None, 129])
sch.split(k, factors=[3, None])
tvm.ir.assert_structural_equal(elementwise_split_with_predicate, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_fuse_fail_not_only_child():
sch = tir.Schedule(elementwise_with_seq, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
def test_fuse_split_fail_with_annotation():
sch = tir.Schedule(elementwise_with_anno, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_split_fail_not_start_with_zero():
sch = tir.Schedule(elementwise_with_anno, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_with_opaque_block():
sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all")
block_opaque = sch.get_block("opaque")
i, j, k = sch.get_loops(block_opaque)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_fuse_with_opaque_block, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block)
def test_fuse_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
sch.fuse(i, j)
block_b = sch.get_block("B")
i, j = sch.get_loops(block_b)
sch.fuse(i, j)
tvm.ir.assert_structural_equal(opaque_access_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_split_with_opaque_block():
sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all")
block_opaque = sch.get_block("opaque")
i, _, _ = sch.get_loops(block_opaque)
sch.split(i, factors=[None, 16])
tvm.ir.assert_structural_equal(elementwise_split_with_opaque_block, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block)
def test_split_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
_, j = sch.get_loops(block_a)
sch.split(j, factors=[None, 4])
block_b = sch.get_block("B")
_, j = sch.get_loops(block_b)
sch.split(j, factors=[None, 4])
tvm.ir.assert_structural_equal(opaque_access_split, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_split_with_non_positive_factors():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(i, factors=[-2, -64])
with pytest.raises(tvm.tir.ScheduleError):
sch.split(j, factors=[0, None])
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, -16])
def test_fuse_split_fail_with_thread_binding():
sch = tir.Schedule(elementwise_with_thread_binding, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_symbolic():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_symbolic_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic)
def test_split_symbolic():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
_, _, k = sch.get_loops(block_b)
sch.split(k, factors=[10, None])
tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic)
def test_fuse_fail_with_dependent_loops():
sch = tir.Schedule(elementwise_dependent_loops, debug_mask="all")
block_b = sch.get_block("B")
i, j, _ = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(i, j)
def test_fuse_not_affine():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
sch.fuse(j, k)
tvm.ir.assert_structural_equal(elementwise_not_affine_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_not_affine)
def test_add_unit_loop_above_block():
@T.prim_func
def zero_dim(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
@T.prim_func
def zero_dim_added(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u in range(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
sch = tir.Schedule(zero_dim, debug_mask="all")
block = sch.get_block("C")
sch.add_unit_loop(block)
tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"])
def test_add_unit_loop_above_loop():
@T.prim_func
def zero_dim(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u in range(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
@T.prim_func
def zero_dim_added(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u1, u2 in T.grid(1, 1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
sch = tir.Schedule(zero_dim, debug_mask="all")
block = sch.get_block("C")
(loop,) = sch.get_loops(block)
sch.add_unit_loop(loop)
tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"])
@pytest.mark.skip("Pending fix in affine analysis")
def test_fuse_int64():
def _create_prim_func():
n = te.const(16, "int32")
m = te.const(32, "int64")
A = te.placeholder((n, m), name="A", dtype="int32")
B = te.compute((n, m), lambda i, j: A[i, j] + 1, name="B")
return te.create_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
i, j = sch.get_loops(sch.get_block("B"))
sch.fuse(i, j)
verify_trace_roundtrip(sch=sch, mod=mod)
def test_split_int64_extent_with_mixed_factors():
def _create_prim_func():
m = te.const(384, "int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
return te.create_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
(i,) = sch.get_loops(sch.get_block("B"))
sch.split(
i,
factors=[
te.const(1, "int64"),
te.const(512, "int32"),
],
)
def test_split_int64_extent_with_int32_factors():
def _create_prim_func():
m = te.const(12, "int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
return te.create_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
(i,) = sch.get_loops(sch.get_block("B"))
sch.split(
i,
factors=[
te.const(1, "int32"),
te.const(1, "int32"),
te.const(3, "int32"),
te.const(1, "int32"),
te.const(4, "int32"),
],
)
def test_split_int64_factors():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
_, _, k = sch.get_loops(block_b)
sch.split(k, factors=[IntImm(dtype="int64", value=10), None])
tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"])
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import gc
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir import IRModule
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
def replace_ir_builder(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
s = tir.ScheduleState(new_func, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_module(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
other_func = tvm.script.from_source(elementwise.script())
mod = IRModule(functions={"main": new_func, "other": other_func})
s = tir.ScheduleState(mod, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_with_opaque():
func = tvm.script.from_source(block_in_opaque_block.script())
s = tir.ScheduleState(func, debug_mask="all")
gc.collect()
return s
def test_replace_direct_write0():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
# There is no other reference so the AST node can be written directly
assert old_hash == s.mod["main"].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
# The target reuse the stmt of the sref, so the sref won't be None
assert sref.stmt is not None
def test_replace_direct_write1():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].body.block.body.__hash__()
hold_ref = s.mod["main"].body.block.body[1]
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
# There is no other reference so the AST node can be written directly
assert old_hash == s.mod["main"].body.block.body.__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
# The target reuse `sref.stmt`, so the sref won't be None
assert sref.stmt is not None
def test_replace_copy():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].__hash__()
# We hold another reference of func
old_func = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
# We need to copy the whole func to remain the old_func unchanged
assert old_hash != s.mod["main"].__hash__()
assert not tvm.ir.structural_equal(old_func.body, s.mod["main"].body)
assert old_hash == old_func.__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_partial_copy0():
s, target = replace_ir_builder(deep_copy=True, realize=True)
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0]
ref_old_hash = hold_ref.__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
# The stmt is held by `hold_sref`, so it will be coped in copy-on-write
# because the ref count is not unique
assert ref_old_hash != s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# The function and the other part stmt can be directly written
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body, target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_partial_copy1():
s, target = replace_ir_builder(deep_copy=True)
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0].body
stmt_old_hash = s.mod["main"].body.block.body[0].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
# The parent stmt will change since there is only one reference
assert stmt_old_hash == s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# The function and the other part stmt can be directly written
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body.body.block, target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_root_write():
s, target = replace_ir_builder()
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check no copy and the new body equals to target
assert old_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
def test_replace_root_copy0():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod["main"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy1():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].body.block.__hash__()
func_ref = s.mod["main"].body.block
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].body.block.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy2():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.functions.__hash__()
func_ref = s.mod.functions
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod.functions.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
for _, v in func_ref.items():
assert not tvm.ir.structural_equal(v.body.block, target)
def test_replace_root_copy3():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.__hash__()
func_ref = s.mod
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref["main"].body.block, target)
def test_replace_block_remap():
func = elementwise
s = tir.ScheduleState(func, debug_mask="all")
# The target stmt
target = matmul.body.block.body.body.body[0].block
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
s.replace(sref, target, {sref.stmt: target})
sref_new = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
# Check the original sref has been remapped
assert sref.__hash__() == sref_new.__hash__()
tvm.ir.assert_structural_equal(sref.stmt, target)
def test_replace_block_in_opaque_block():
s = replace_ir_builder_with_opaque()
root_hash = s.mod["main"].__hash__()
for_loop = s.mod["main"].body.block.body.body.block.body[1].then_case.block.body
sref = s.get_sref(for_loop)
new_for_loop = tir.For(
loop_var=for_loop.loop_var,
min_val=0,
extent=128,
kind=tir.ForKind.SERIAL,
body=tir.Evaluate(0),
thread_binding=None,
annotations=None,
)
s.replace(sref, new_for_loop)
assert root_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(sref.stmt, new_for_loop)
def test_replace_ir_module():
s, target = replace_ir_builder_module(deep_copy=True)
old_hash = s.mod["main"].__hash__()
other_func_hash = s.mod["other"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
assert other_func_hash == s.mod["other"].__hash__()
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_state_cached_flags.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.state import CachedFlags
from tvm.tir.stmt_functor import post_order_visit
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
# fmt: off
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def write_after_read(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def loop_carried_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = T.if_then_else(vi >= 1, B[vi - 1] + 1.0, 0.0, dtype="float32")
@T.prim_func
def concatenate_multi_producer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T.prim_func
def concatenate_multi_producer_uncovered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 63):
with T.block("A_0"):
vi = T.axis.S(63, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T.prim_func
def lca_at_loop(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = B[vi] + 1.0
@T.prim_func
def multi_producer_consumer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(64, i)
B[vi] = A[vi] + 2.0
for i in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(64, i + 64)
B[vi] = A[vi] + 3.0
@T.prim_func
def elementwise_affine_producer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j, k, l in T.grid(16, 2, 32, 16):
with T.block("B"):
vi = T.axis.S(128, i * 8 + j * 4 + k // 8)
vj = T.axis.S(128, k % 8 * 16 + l)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 4, vj * 4 : vj * 4 + 4]])
T.writes([B[vi * 4 : vi * 4 + 4, vj * 4 : vj * 4 + 4]])
for ii, jj in T.grid(4, 4):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock_uncovered(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
T.writes([B[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
for ii, jj in T.grid(2, 2):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def bound_to_thread(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def equal_ranked_threads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i_o in T.thread_binding(0, 16, thread="threadIdx.x"):
for i_i in T.thread_binding(0, 8, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.block("B"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def warp_memory(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def warp_memory_negative(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for i_o_prime in T.thread_binding(0, 4, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.block("C"):
_warp_id, warp_id, lane_id, vj = T.axis.remap(
"SSSS", [i_o, i_i, i_o_prime, j]
)
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def non_perfect_tiling_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0 in T.serial(0, 10):
for ax1 in T.serial(0, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(
1 <= hh_0 * 8 + ax0
and hh_0 * 8 + ax0 < 225
and 1 <= ww_0 * 8 + ax1
and ww_0 * 8 + ax1 < 225
)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(
Y[h, w],
T.if_then_else(
T.likely(1 <= h + kh, dtype="bool")
and T.likely(h + kh < 225, dtype="bool")
and T.likely(1 <= w + kw, dtype="bool")
and T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1],
0.0,
dtype="float32",
),
)
@T.prim_func
def uncovered_producer_region(A: T.Buffer[(128,), "float32"], B: T.Buffer[(128,), "float32"]):
for i in range(120):
with T.block("producer"):
vi = T.axis.S((0, 120), i)
A[vi] = 1.0
for i in range(120):
with T.block("consumer"):
vi = T.axis.S((8, 128), i + 8)
B[vi] = A[vi]
@T.prim_func
def matmul_relu_padding(A: T.Buffer[(127, 127), "float16"], B: T.Buffer[(127, 127), "float16"], compute: T.Buffer[(127, 127), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([127, 127], dtype="float32")
A_reindex = T.alloc_buffer([128, 128], dtype="float16")
B_reindex = T.alloc_buffer([128, 128], dtype="float16")
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
for ax0, ax1, ax2 in T.grid(128, 1, 128):
with T.block("A_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(A[v0, v2])
T.writes(A_reindex[v0, v2])
A_reindex[v0, v2] = T.if_then_else(v0 < 127 and v2 < 127, A[v0, v2], T.float16(0), dtype="float16")
for ax0, ax1, ax2 in T.grid(1, 128, 128):
with T.block("B_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(B[v2, v1])
T.writes(B_reindex[v2, v1])
B_reindex[v2, v1] = T.if_then_else(v2 < 127 and v1 < 127, B[v2, v1], T.float16(0), dtype="float16")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(16, thread="threadIdx.y"):
for ax2_0_0, ax2_0_1, ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(2, 2, 1, 2, 2, 1, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused // 2 + ax0_0_3 + ax0_0_4)
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_0_ax1_0_0_fused * 4 + ax0_0_2_ax1_0_2_fused % 2 * 2 + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0, ax1 in T.grid(16, 32):
with T.block("C_reindex_shared_wmma.accumulator"):
v0 = T.axis.spatial(128, ax0_0_2_ax1_0_2_fused // 2 * 16 + ax0)
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_2_ax1_0_2_fused % 2 * 32 + ax1)
T.reads(C_reindex_shared_wmma_accumulator[v0, v1])
T.writes(C_reindex_shared[v0, v1])
C_reindex_shared[v0, v1] = C_reindex_shared_wmma_accumulator[v0, v1]
for ax0, ax1 in T.grid(128, 64):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0)
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax1)
T.where(ax0 < 127 and ax0_0_0_ax1_0_0_fused * 64 + ax1 < 127)
T.reads(C_reindex_shared[v0, v1])
T.writes(C[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":3})
C[v0, v1] = C_reindex_shared[v0, v1]
for i0, i1 in T.grid(127, 127):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
@T.prim_func
def splitted_square_sum_with_predicate(
A: T.Buffer[(1, 7, 7, 512), "float32"], B: T.Buffer[(1, 1, 1, 512), "float32"]
) -> None:
for i0_i1_i2_i3_0_fused, ax0, ax1, ax2, ax3 in T.grid(2, 1, 1, 1, 256):
for ax4_ax5_fused_0, ax4_ax5_fused_1 in T.grid(1, 256):
with T.block("B"):
T.where(ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1 < 49)
ax0_1, ax1_1, ax2_1 = T.axis.remap("SSS", [ax0, ax1, ax2])
ax3_1 = T.axis.spatial(512, i0_i1_i2_i3_0_fused * 256 + ax3)
rv0 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1) // 7)
rv1 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1) % 7)
T.reads(A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
B[ax0_1, ax1_1, ax2_1, ax3_1] += A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
# fmt: on
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise():
s = tir.ScheduleState(elementwise, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_matmul():
s = tir.ScheduleState(matmul, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "init")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "update")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_block_in_opaque_block():
s = tir.ScheduleState(block_in_opaque_block, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "E")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "F")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_write_after_read():
s = tir.ScheduleState(write_after_read, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_loop_carried_dependency():
s = tir.ScheduleState(loop_carried_dependency, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_concatenate_multi_producer_covered(): # pylint: disable=invalid-name
s = tir.ScheduleState(concatenate_multi_producer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_concatenate_multi_producer_uncovered(): # pylint: disable=invalid-name
s = tir.ScheduleState(concatenate_multi_producer_uncovered, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_lca_at_loop():
s = tir.ScheduleState(lca_at_loop, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_multi_producer_consumer():
s = tir.ScheduleState(multi_producer_consumer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_elementwise_affine_producer():
s = tir.ScheduleState(elementwise_affine_producer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_subblock():
s = tir.ScheduleState(elementwise_subblock, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_subblock_uncovered():
s = tir.ScheduleState(elementwise_subblock_uncovered, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_thread_binding():
s = tir.ScheduleState(bound_to_thread, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_equal_ranked_threads():
s = tir.ScheduleState(equal_ranked_threads, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_warp_memory():
s = tir.ScheduleState(warp_memory, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_warp_memory_negative():
s = tir.ScheduleState(warp_memory_negative, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_non_perfect_tiling_cache():
s = tir.ScheduleState(non_perfect_tiling_cache, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "cache")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "compute")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_uncovered_producer_region():
s = tir.ScheduleState(uncovered_producer_region, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "consumer")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_matmul_relu_padding():
s = tir.ScheduleState(matmul_relu_padding, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "C_reindex_shared")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_splitted_square_sum_with_predicate():
s = tir.ScheduleState(splitted_square_sum_with_predicate, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_storage_align.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
@T.prim_func
def element_wise_storage_align(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
T.block_attr({"buffer_dim_align":[[0, 0, 128, 127]]})
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
@T.prim_func
def element_wise_invalid_annotation(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
# body
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
T.block_attr({"buffer_dim_align": [0]})
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_storage_align(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = 'B' if use_block_name else s.get_block("B")
s.storage_align(B, 0, axis=0, factor=128, offset=127)
tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_storage_align_update():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
s.storage_align(B, 0, axis=0, factor=128, offset=0)
s.storage_align(B, 0, axis=0, factor=128, offset=127)
tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_storage_align_invalid_factor1():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=0, factor=0, offset=127)
def test_storage_align_invalid_factor2():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=0, factor=-1, offset=127)
def test_storage_align_invalid_buffer():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
C = s.get_block("C")
with pytest.raises(tir.ScheduleError):
s.storage_align(C, 0, axis=0, factor=128, offset=127)
def test_storage_align_invalid_buffer_index():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 2, axis=0, factor=128, offset=127)
def test_storage_align_invalid_axis():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=2, factor=128, offset=127)
def test_storage_align_invalid_annotation():
func = element_wise_invalid_annotation
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=2, factor=128, offset=127)
if __name__ == "__main__":
test_storage_align()
test_storage_align_update()
test_storage_align_invalid_factor1()
test_storage_align_invalid_factor2()
test_storage_align_invalid_buffer()
test_storage_align_invalid_buffer_index()
test_storage_align_invalid_axis()
test_storage_align_invalid_annotation()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
from tvm.tir.tensor_intrin.arm_cpu import (
DP4A_INTRIN,
ARM_DOT_4x4_i8_NEON_INTRIN,
ARM_DOT_4x4_i8_SDOT_INTRIN,
)
from tvm.tir.tensor_intrin.rocm import AMDGPU_SDOT4_INTRIN
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset // 256,
A.data,
A.elem_offset // 256,
B.data,
B.elem_offset // 256,
C.data,
C.elem_offset // 256,
dtype="handle",
)
)
@T.prim_func
def dot_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,))
B = T.match_buffer(b, (4,))
C = T.match_buffer(c, ())
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[()] = C[()] + A[vi] * B[vi]
@T.prim_func
def dot_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), offset_factor=1)
B = T.match_buffer(b, (4,), offset_factor=1)
C = T.match_buffer(c, (), offset_factor=1)
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
T.evaluate(
T.call_extern(
"vec4add",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def outer_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
for i, j in T.grid(16, 16):
with T.block("update"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = C[vii, vjj] + A[vii, 0] * B[vjj, 0]
@T.prim_func
def outer_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.call_extern(
"outer_product",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def matmul(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def batch_matmul(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@T.prim_func
def tensorized_batch_matmul_mma(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n in range(0, 16):
for i, j, k in T.grid(8, 8, 8):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
T.reads(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
)
T.writes(C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
(16, 16),
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def tensorized_batch_matmul_dot_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k_0 in T.grid(16, 128, 128, 32):
with T.block("blockized_update"):
vn, vi, vj, vko = T.axis.remap("SSSR", [n, i, j, k_0])
T.reads(
C[vn, vi, vj], A[vn, vi, vko * 4 : vko * 4 + 4], B[vn, vj, vko * 4 : vko * 4 + 4]
)
T.writes(C[vn, vi, vj])
A_1 = T.match_buffer(
A[vn, vi, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
B_1 = T.match_buffer(
B[vn, vj, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(C[vn, vi, vj], [], dtype="float32", offset_factor=1)
T.evaluate(
T.call_extern(
"vec4add",
C_1.data,
C_1.elem_offset,
A_1.data,
A_1.elem_offset,
B_1.data,
B_1.elem_offset,
dtype="int32",
)
)
@T.prim_func
def tensorized_batch_matmul_outer_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i_0, j_0, k in T.grid(16, 8, 8, 128):
with T.block("blockized_update"):
vn, vio, vjo, vk = T.axis.remap("SSSR", [n, i_0, j_0, k])
T.reads(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vn, vio * 16 : vio * 16 + 16, vk],
B[vn, vjo * 16 : vjo * 16 + 16, vk],
)
T.writes(C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(A[vn, vio * 16 : vio * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1)
B_1 = T.match_buffer(B[vn, vjo * 16 : vjo * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16], [16, 16], dtype="float32", offset_factor=1
)
T.evaluate(
T.call_extern("outer_product", C_1.data, C_1.elem_offset, A_1.data, A_1.elem_offset,
B_1.data, B_1.elem_offset, dtype="int32"
)
)
@T.prim_func
def annotated_mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
T.block_attr({"test_annotation": True})
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def annotated_matmul(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.block_attr({"test_annotation": True})
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def annotated_tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
T.block_attr({"test_annotation": True})
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
tir.TensorIntrin.register("test_annotated_mma_intrin", annotated_mma_desc, mma_intrin)
tir.TensorIntrin.register("test_dot_product_intrin", dot_product_desc, dot_product_intrin)
tir.TensorIntrin.register("test_outer_product_intrin", outer_product_desc, outer_product_intrin)
def test_tensorize_matmul():
func = matmul
# schedule
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_batch_matmul():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
_, i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_mma, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=batch_matmul)
def test_tensorize_dot_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, _, _, k = s.get_loops(C)
_, ki = s.split(k, factors=[None, 4])
s.tensorize(ki, "test_dot_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_dot_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_outer_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
s.reorder(io, jo, k, ii, ji)
s.tensorize(ii, "test_outer_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_outer_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_with_annotation():
func = annotated_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_annotated_mma_intrin")
tvm.ir.assert_structural_equal(annotated_tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def get_matmul_packed(m, n, k, lhs_type, int32_lanes):
X = te.placeholder((m, k), name="X", dtype=lhs_type)
packed_W = te.placeholder((n // int32_lanes, k // 4, int32_lanes, 4), name="packedW", dtype="int8")
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packed_W[
tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4
].astype("int32"),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, packed_W, matmul])
def test_tensorize_vnni():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "uint8", 16)
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 16])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, VNNI_DOT_16x4_INTRIN)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_arm_dot():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "int8", 4)
for intrin in [ARM_DOT_4x4_i8_SDOT_INTRIN, ARM_DOT_4x4_i8_NEON_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 4])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_dpa4():
m, n, k = 128, 128, 128
X = te.placeholder((m, k), name="X", dtype="int8")
W = te.placeholder((n, k), name="W", dtype="int8")
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* W[j, ak].astype("int32"),
axis=ak,
),
name="compute",
)
func = te.create_prim_func([X, W, matmul])
for intrin in [AMDGPU_SDOT4_INTRIN, DP4A_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
i, j, k = sch.get_loops(block)
by, ty, yi = sch.split(i, factors=sch.sample_perfect_tile(i, n=3))
bx, tx, xi = sch.split(j, factors=sch.sample_perfect_tile(j, n=3))
ko, ki = sch.split(k, [None, 4])
ko, kt = sch.split(ko, factors=sch.sample_perfect_tile(ko, n=2))
sch.reorder(by, bx, ty, tx, yi, xi)
CC = sch.cache_write(block, 0, "local")
sch.reverse_compute_at(CC, tx)
def fetch_to_shared(block, idx):
block_read = sch.cache_read(block, idx, "shared")
sch.compute_at(block_read, ko, True)
return block_read
fetch_to_shared(block, 0)
fetch_to_shared(block, 1)
sch.decompose_reduction(block, ko)
sch.tensorize(ki, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensor_intrin_look_up():
intrin_name = 'non_existent_intrin'
assert tir.TensorIntrin.get(intrin_name, allow_missing=True) is None
with pytest.raises(ValueError):
tir.TensorIntrin.get(intrin_name)
def test_tensorize_matmul_mixed_dtype():
# fmt: off
@T.prim_func
def matmul_int64_shape(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128), T.int64(128)), "float32"]
) -> None:
for i_0, j_0 in T.grid(T.int64(8), T.int64(8)):
for i_1_init, j_1_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1_init)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1_init)
C[vi, vj] = T.float32(0)
for k_0, i_1, j_1, k_1 in T.grid(T.int64(8), T.int64(16), T.int64(16), T.int64(16)):
with T.block("update"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1)
vk = T.axis.reduce(T.int64(128), k_0 * T.int64(16) + k_1)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul_int64_shape(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128), T.int64(128)), "float32"]
) -> None:
for i_outer, j_outer in T.grid(T.int64(8), T.int64(8)):
for i_inner_init, j_inner_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_outer * T.int64(16) + i_inner_init)
vj = T.axis.spatial(T.int64(128), j_outer * T.int64(16) + j_inner_init)
C[vi, vj] = T.float32(0)
for k_outer in T.grid(T.int64(8)):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
]
)
T.writes(C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
# fmt: on
s = tir.Schedule(matmul_int64_shape, debug_mask="all")
update = s.get_block("update")
ii = s.get_loops(update)[-3]
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(s.mod["main"], tensorized_matmul_int64_shape)
verify_trace_roundtrip(sch=s, mod=matmul_int64_shape)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_tensorize_ldmatrix_mma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
from tvm import te
from tvm.tir.tensor_intrin.cuda import (
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_32x16_B_INTRIN,
LDMATRIX_16x32_B_TRANS_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_f16f16f32_TRANS_INTRIN,
MMA_f16f16f16_INTRIN,
MMA_f16f16f16_TRANS_INTRIN,
MMA_i8i8i32_INTRIN,
MMA_i8i8i32_TRANS_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
shared_16x16_to_ldmatrix_32x8_layout,
shared_32x16_to_ldmatrix_32x16_layout,
shared_16x32_to_ldmatrix_32x16_layout,
)
import tvm.testing
import numpy as np
from tvm.testing.tir import mma_schedule
M = 4096
N = 4096
K = 4096
measure_perf = False
gflops = (N * M * K) * 2 / 1e9
def matmul(m, n, k, in_dtype, out_dtype, b_transposed):
b_shape = (n, k) if b_transposed else (k, n)
a = te.placeholder((m, k), name="A", dtype=in_dtype)
b = te.placeholder(b_shape, name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
def maybe_cast(v):
if in_dtype != out_dtype:
return tvm.tir.Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
c = te.compute(
(m, n),
lambda i, j: te.sum(maybe_cast(a[i, k]) * maybe_cast(b[maybe_swap(k, j)]), axis=[k]),
name="C",
)
return (a, b, c)
def run_test(
k_inner,
in_dtype,
out_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
):
sch = mma_schedule(
te.create_prim_func(matmul(M, N, K, in_dtype, out_dtype, b_transposed)),
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
)
f = tvm.build(sch.mod["main"], target="cuda", name="dense")
dev = tvm.device("cuda", 0)
if in_dtype == "float16":
a_np = np.random.uniform(size=(M, K)).astype("float16")
if b_transposed:
b_np = np.random.uniform(size=(N, K)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
out_dtype
)
else:
b_np = np.random.uniform(size=(K, N)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype(out_dtype)
else:
a_np = np.random.randint(-128, 128, (M, K)).astype("int8")
if b_transposed:
b_np = np.random.randint(-128, 128, (N, K)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
"int32"
)
else:
b_np = np.random.randint(-128, 128, (K, N)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype("int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((M, N), dtype=out_dtype), dev)
f(a, b, c)
if out_dtype != "float16":
# The numpy reference is computed with fp32 precision (otherwise too slow).
# So there is non-trivial accuracy difference if TVM result is computed with fp16 accumulation.
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
return lambda: f.time_evaluator(f.entry_name, dev, number=500)(a, b, c)
@tvm.testing.requires_cuda_compute_version(8)
def test_f16f16f32_m16n16k16():
def index_map(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
k_inner = 16
in_dtype = "float16"
out_dtype = "float32"
i_factors, j_factors, k_factors = [4, 8, 2, 4, 1], [1, 64, 2, 1, 2], [128, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f32_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True, # b_transposed
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
MMA_f16f16f32_TRANS_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f32_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_cuda_compute_version(8)
def test_f16f16f16_m16n16k16():
def index_map(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
k_inner = 16
in_dtype = "float16"
out_dtype = "float16"
i_factors, j_factors, k_factors = [16, 2, 1, 4, 2], [16, 2, 2, 1, 4], [128, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
MMA_f16f16f16_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f16_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True, # b_transposed
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
MMA_f16f16f16_TRANS_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f16_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_cuda_compute_version(8)
def test_i8i8i32_m16n16k32():
def index_map_A(i, j):
return (
i // 16,
j // 32,
*shared_16x32_to_ldmatrix_32x16_layout(i % 16, j % 32),
)
def index_map_B(i, j):
return (
i // 32,
j // 16,
*shared_32x16_to_ldmatrix_32x16_layout(i % 32, j % 16),
)
def index_map_C(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
k_inner = 32
in_dtype = "int8"
out_dtype = "int32"
i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [8, 4, 4, 2, 1], [32, 2, 2]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_32x16_B_INTRIN,
MMA_i8i8i32_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
)
if measure_perf and timer:
print("i8i8i32_m16n16k32: %f GOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True, # b_transposed
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_A,
index_map_C,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_16x32_B_TRANS_INTRIN,
MMA_i8i8i32_TRANS_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
)
if measure_perf and timer:
print("i8i8i32_m16n16k32_trans: %f GOPS" % (gflops / (timer().mean)))
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_trace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
# mypy: ignore-errors
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule import BlockRV, Instruction, InstructionKind, LoopRV, Trace
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
# pylint: enable=no-member,invalid-name,unused-variable
def _make_get_block(name, output):
return Instruction(
kind=InstructionKind.get("GetBlock"),
inputs=[],
attrs=[name, "main"],
outputs=[output],
)
def _make_get_loops(input, outputs): # pylint: disable=redefined-builtin
return Instruction(
kind=InstructionKind.get("GetLoops"),
inputs=[input],
attrs=[],
outputs=outputs,
)
def _make_compute_inline(input): # pylint: disable=redefined-builtin
return Instruction(
kind=InstructionKind.get("ComputeInline"),
inputs=[input],
attrs=[],
outputs=[],
)
def _make_split(inputs, outputs): # pylint: disable=redefined-builtin
return Instruction(
kind=InstructionKind.get("Split"),
inputs=inputs,
attrs=[True],
outputs=outputs,
)
def _make_enter_postproc():
return Instruction(
kind=InstructionKind.get("EnterPostproc"),
inputs=[],
attrs=[],
outputs=[],
)
def _make_annotate(block: BlockRV, annotation: str):
return Instruction(
kind=InstructionKind.get("Annotate"),
inputs=[block, annotation],
attrs=["meta_schedule.auto_tensorize"],
outputs=[],
)
def _make_trace_1(b0, l1, l2): # pylint: disable=invalid-name
return Trace(
insts=[
_make_get_block(name="block", output=b0),
_make_get_loops(input=b0, outputs=[l1, l2]),
],
decisions={},
)
def _make_trace_2(b0): # pylint: disable=invalid-name
return Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
],
decisions={},
)
def _make_trace_3(b0, b1, add_postproc): # pylint: disable=invalid-name
if add_postproc:
insts = [
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
_make_get_block(name="C", output=b1),
_make_enter_postproc(),
_make_compute_inline(input=b1),
]
else:
insts = [
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
_make_get_block(name="C", output=b1),
]
return Trace(insts=insts, decisions={})
def _make_trace_4(b0, l1, l2, l3): # pylint: disable=invalid-name
return Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_get_loops(input=b0, outputs=[l1]),
_make_split([l1, None, 32], [l2, l3]),
],
decisions={},
)
def test_trace_construct_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="block", func_name="main")',
" l1, l2 = sch.get_loops(block=b0)",
)
)
assert len(trace.insts) == 2
assert len(trace.decisions) == 0
def test_trace_construct_get_decision_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
assert trace.get_decision(trace.insts[0]) is None
assert trace.get_decision(trace.insts[1]) is None
def test_trace_construct_append_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
trace.append(inst=_make_get_block("block2", BlockRV()))
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="block", func_name="main")',
" l1, l2 = sch.get_loops(block=b0)",
' b3 = sch.get_block(name="block2", func_name="main")',
)
)
def test_trace_construct_pop_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
last_inst = trace.insts[-1]
assert trace.pop().same_as(last_inst)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="block", func_name="main")',
)
)
def test_trace_construct_pop_2():
trace = Trace([], {})
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
" pass",
)
)
assert trace.pop() is None
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
" pass",
)
)
def test_trace_apply_to_schedule():
trace = _make_trace_2(BlockRV())
sch = tir.Schedule(elementwise, debug_mask="all")
trace.apply_to_schedule(sch, remove_postproc=False, decision_provider=None)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
def test_trace_as_json_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
obj = trace.as_json()
assert obj == [
[
["GetBlock", [], ["block", "main"], ["b0"]],
["GetLoops", ["b0"], [], ["l1", "l2"]],
],
[],
]
def test_trace_simplified_1():
trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
trace = trace.simplified(remove_postproc=True)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
)
)
def test_trace_simplified_2():
trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
trace = trace.simplified(remove_postproc=False)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
def test_trace_simplified_3():
trace = _make_trace_4(BlockRV(), LoopRV(), LoopRV(), LoopRV()).simplified(remove_postproc=False)
assert str(trace) == "\n".join(
(
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" l1, = sch.get_loops(block=b0)",
" l2, l3 = sch.split(loop=l1, factors=[None, 32], preserve_unit_iters=True)",
)
)
def test_apply_json_to_schedule_1():
trace = _make_trace_2(BlockRV())
json_obj = trace.as_json()
sch = tir.Schedule(elementwise, debug_mask="all")
Trace.apply_json_to_schedule(json_obj, sch)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
def _test_apply_annotation_trace_from_json(annotation: str):
"""Test applying an annotation works without crashing.
Designed to handle some previously failing edge cases like the
empty string.
"""
b0 = BlockRV()
trace = Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_annotate(block=b0, annotation=annotation),
],
decisions={},
)
json_obj = trace.as_json()
sch = tir.Schedule(elementwise, debug_mask="all")
Trace.apply_json_to_schedule(json_obj, sch)
@T.prim_func
def elementwise_expected(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.block_attr({"meta_schedule.auto_tensorize": annotation})
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
tvm.ir.assert_structural_equal(elementwise_expected, sch.mod["main"])
def test_apply_annotation_from_json():
# Something reasonable
_test_apply_annotation_trace_from_json("SSRSSR")
# The empty string
_test_apply_annotation_trace_from_json("")
# A string of two quotation marks
_test_apply_annotation_trace_from_json('""')
# A string of one quotation mark
_test_apply_annotation_trace_from_json('"')
if __name__ == "__main__":
test_trace_simplified_2()
# tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
from tvm.tir import Schedule
from tvm.tir.schedule.transform import tile_with_tensor_intrin
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN
@tvm.script.ir_module
class DenseVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class DenseVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1_0, i2_0, i1_1, i2_1 in T.grid(1024, 64, 256, 16, 4):
with T.block("compute"):
i = T.axis.spatial(1024, i0)
j = T.axis.spatial(1024, i1_0 * 16 + i1_1)
k = T.axis.reduce(1024, i2_0 * 4 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3, i4_0, i5, i6, i7, i8, i9_0, i4_1, i9_1 in T.grid(
1, 16, 56, 56, 1, 1, 1, 4, 4, 1, 16, 4
):
with T.block("conv2d_NCHWc_int8"):
n, oc_chunk, oh, ow = T.axis.remap("SSSS", [i0, i1, i2, i3])
oc_block = T.axis.spatial(16, i4_0 * 16 + i4_1)
kh, kw, ic_outer, ic_f_inner = T.axis.remap("RRRR", [i5, i6, i7, i8])
ic_s_inner = T.axis.reduce(4, i9_0 * 4 + i9_1)
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def test_tile_with_tensor_intrin_dense_vnni():
s = Schedule(DenseVNNIModule)
block = s.get_block("compute")
tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN)
_, _, _, i1_1, _ = s.get_loops(block)
assert s.get(tiled_loop) == s.get(i1_1)
tvm.ir.assert_structural_equal(s.mod, DenseVNNIModuleTiled)
def test_tile_with_tensor_intrin_conv2d_nchwc_vnni():
s = Schedule(Conv2dNCHWcVNNIModule)
block = s.get_block("conv2d_NCHWc_int8")
tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN)
tiled_loops = s.get_loops(block)
assert len(tiled_loops) == 12
assert s.get(tiled_loop) == s.get(tiled_loops[-2])
tvm.ir.assert_structural_equal(s.mod, Conv2dNCHWcVNNIModuleTiled)
if __name__ == "__main__":
test_tile_with_tensor_intrin_dense_vnni()
test_tile_with_tensor_intrin_conv2d_nchwc_vnni()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_transform_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
def packed_index_map_func(m, n):
return m // 16, n // 16, m % 16, n % 16
@T.prim_func
def two_elementwise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_intermediate_buffer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((8, 8, 16, 16), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi // 16, vj // 16, vi % 16, vj % 16] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi // 16, vj // 16, vi % 16, vj % 16] + 1.0
@T.prim_func
def two_elementwise_transformed_input_buffer(
A: T.Buffer[(8, 8, 16, 16), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi // 16, vj // 16, vi % 16, vj % 16] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_output_buffer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 8, 16, 16), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi // 16, vj // 16, vi % 16, vj % 16] = B[vi, vj] + 1.0
@T.prim_func
def elementwise(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_transformed(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]) -> None:
for i in range(16384):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi // 128, vi % 128] = A[vi // 128, vi % 128] * 2.0
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_transformed(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax0, ax1, ax2 in T.grid(12544, 64, 147):
with T.block("conv2d_nhwc"):
v0, v1, v2 = T.axis.remap("SSR", [ax0, ax1, ax2])
T.reads(PadInput[v0 // 12544, v0 // 112 * 2 + v2 // 21, v0 % 112 * 2 + v2 % 21 // 3, v2 % 3], Weight[v2 // 21, v2 % 21 // 3, v2 % 3, v1])
T.writes(Conv2d_nhwc[v0 // 12544, v0 // 112, v0 % 112, v1])
with T.init():
Conv2d_nhwc[v0 // 12544, v0 // 112, v0 % 112, v1] = T.float32(0)
Conv2d_nhwc[v0 // 12544, v0 // 112, v0 % 112, v1] = Conv2d_nhwc[v0 // 12544, v0 // 112, v0 % 112, v1] + PadInput[v0 // 12544, v0 // 112 * 2 + v2 // 21, v0 % 112 * 2 + v2 % 21 // 3, v2 % 3] * Weight[v2 // 21, v2 % 21 // 3, v2 % 3, v1]
@T.prim_func
def two_elementwise_unit_dim(A: T.Buffer[(1, 128), "float32"], C: T.Buffer[(1, 128), "float32"]) -> None:
B = T.alloc_buffer((1, 128), "float32")
for i, j in T.grid(1, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
# fmt: on
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_two_elementwise_transform_intermediate_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
block="B",
buffer="B",
index_map=packed_index_map_func,
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_intermediate_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_transform_input_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func,
block="B",
buffer="A",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("read", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_input_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_transform_output_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func,
block="C",
buffer="C",
)
else:
block = sch.get_block("C")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_output_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
index_map = lambda i, j: (i, j)
if use_block_name:
sch.transform_layout(
index_map=index_map,
block="B",
buffer="B",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), index_map)
tvm.ir.assert_structural_equal(two_elementwise_unit_dim, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_simplify():
sch = tir.Schedule(two_elementwise, debug_mask="all")
i, j = sch.get_loops(sch.get_block("C"))
i, i_inner = sch.split(i, factors=[None, 16])
j, j_inner = sch.split(j, factors=[None, 16])
sch.reorder(
i,
j,
i_inner,
j_inner,
)
block_outer = sch.blockize(i_inner)
B = sch.cache_read(block_outer, 0, "global")
sch.transform_layout(B, ("write", 0), lambda i, j: (i // 16, j // 16, i % 16, j % 16))
@T.prim_func
def ref(B: T.Buffer[(8, 8, 16, 16), "float32"], C: T.Buffer[(128, 128), "float32"]):
for i_0, j_0 in T.grid(8, 8):
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o, vj_o, 0:16, 0:16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o, vj_o, vi, vj])
T.writes(C[vi_o * 16 + vi, vj_o * 16 + vj])
C[vi_o * 16 + vi, vj_o * 16 + vj] = B[vi_o, vj_o, vi, vj] + T.float32(1)
# Without simplification
# T.reads(B[vi // 16 + vi_o, vj // 16 + vj_o, vi % 16, vj % 16])
# C[...] = B[vi // 16 + vi_o, vj // 16 + vj_o, vi % 16, vj % 16] + T.float32(1)
tvm.ir.assert_structural_equal(ref.body.block.body, sch.get(sch.get_loops(block_outer)[0]))
def test_var_args_sugar():
@T.prim_func
def summation_3d(
A: T.Buffer[(1024, 1024, 32), "float32"], B: T.Buffer[(1,), "float32"]
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk]
@T.prim_func
def summation_3d_split(
A: T.Buffer[(1024, 1024, 8, 4), "float32"], B: T.Buffer[(1,), "float32"]
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk // 4, vk % 4]
sch = tir.Schedule(summation_3d, debug_mask="all")
sch.transform_layout(
index_map=lambda *indices, k: [*indices, k // 4, k % 4], block="compute", buffer="A"
)
tvm.ir.assert_structural_equal(summation_3d_split, sch.mod["main"])
def test_transform_block_layout_basic(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
tvm.ir.assert_structural_equal(elementwise_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_transform_block_layout_conv2d_nhwc(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 + h * 112 + w, co, rh * 7 * 3 + rw * 3 + rc),
)
tvm.ir.assert_structural_equal(conv2d_nhwc_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_transform_block_layout_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (j, i))
@T.prim_func
def two_elementwise_unit_dim_transformed(
A: T.Buffer[(1, 128), "float32"], C: T.Buffer[(1, 128), "float32"]
) -> None:
B = T.alloc_buffer((1, 128), "float32")
for j, i in T.grid(128, 1):
with T.block("B"):
vj, vi = T.axis.remap("SS", [j, i])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
tvm.ir.assert_structural_equal(two_elementwise_unit_dim_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_transform_block_layout_fail_non_affine(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(block, lambda i, j: (i + j,))
def test_transform_block_layout_fail_mixed_iter_type(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 + h * 112 + w, co * 7 + rh, rw * 3 + rc),
)
def test_transform_block_layout_int64_extent(use_block_name):
@T.prim_func
def elementwise_int64_extent(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_int64_extent_transformed(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i in range(T.int64(16384)):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi // T.int64(128), vi % T.int64(128)] = (
A[vi // T.int64(128), vi % T.int64(128)] * 2.0
)
sch = tir.Schedule(elementwise_int64_extent, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
print(
tvm.ir.base.get_first_structural_mismatch(
elementwise_int64_extent_transformed, sch.mod["main"]
)
)
tvm.ir.assert_structural_equal(elementwise_int64_extent_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_int64_extent)
class BasePaddingCompare(tvm.testing.CompareBeforeAfter):
pad_value = tvm.testing.parameter(None)
transformed_buffer = tvm.testing.parameter("A")
@pytest.fixture
def transform(self, pad_value, transformed_buffer):
def transform(mod):
sch = tir.Schedule(mod)
sch.transform_layout(
"block", transformed_buffer, lambda i: [i // 4, i % 4], pad_value=pad_value
)
return sch.mod
return transform
class TestNoPadding(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value."""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi // 4, vi % 4] = 0
class TestNoPaddingMultipleUsage(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but the buffer A shows up in multiple
locations. To remain internally consistent, all instances of the
buffer should be rewritten.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi // 4, vi % 4] = 0
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestNoPaddingOpaqueBlock(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but buffer access is done in an opaque block.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
A[i] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
A[i // 4, i % 4] = 0
class TestErrorIfPaddingForbidden(BasePaddingCompare):
"""Unless padding is explicitly enabled, should raise error"""
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError
class TestErrorOnWrongPaddingType(BasePaddingCompare):
"""The padding must have the same dtype as the buffer"""
pad_value = tvm.testing.parameter(tir.IntImm("int8", 0))
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError
class TestPaddedTransformIfThenElse(BasePaddingCompare):
"""Use if_then_else to represent padding, if possible.
For a block that is a producer of the pre-transformation buffer,
which visits all indices according to a row-major traversal, and
which has no effect other than producing the transformed buffer,
transform the loop iterators to be a row-major traversal of the
post-transformation buffer, with padding represented by
`T.if_then_else`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
dtype = tvm.testing.parameter("int32", "int8")
@tvm.testing.fixture
def before(self, dtype):
@T.prim_func
def func(A: T.Buffer[14, dtype]):
B = T.alloc_buffer(14, dtype)
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
return func
@tvm.testing.fixture
def expected(self, dtype, pad_value):
pad_value = tir.IntImm(dtype, pad_value)
@T.prim_func
def func(A: T.Buffer[14, dtype]):
B = T.alloc_buffer([4, 4], dtype)
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, pad_value, A[vi * 4 + vj], dtype=dtype
)
return func
class TestPaddedTransformWithoutLoop(BasePaddingCompare):
"""Handle padded writes without a loop
The statement being replaced may be something other than a
for-loop, such as if a loop has already been unrolled.
"""
pad_value = tvm.testing.parameter(0)
def before(A: T.Buffer[14, "int32"]):
with T.block("root"):
T.reads()
T.writes()
with T.block("block"):
A[0] = 0
def expected(A: T.Buffer[(4, 4), "int32"]):
with T.block("block"):
A[0, 0] = 0
for i, j in T.grid(4, 4):
with T.block("buffer_A_padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
A[vi, vj] = 0
class TestPaddedTransformIfThenElseReduction(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElse, but with a reduction axis"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer(14, "int32")
for i, k in T.grid(14, 32):
with T.block("block"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0
B[vi] = B[vi] + A[vi, vk]
def expected(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer([4, 4], "int32")
for i, j, k in T.grid(4, 4, 32):
with T.block("block"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
B[vi, vj] = T.if_then_else(vi == 3 and 2 <= vj, 0, 0, dtype="int32")
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, 0, B[vi, vj] + A[vi * 4 + vj, vk], dtype="int32"
)
class TestPaddedTransformIfThenElseReductionOpaque(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElseReduction, but with opaque blocks"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
B[i] = 0
for k in T.serial(32):
with T.block("block"):
B[i] = B[i] + A[i, k]
def expected(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer([4, 4], "int32")
for i, j in T.grid(4, 4):
B[i, j] = T.if_then_else(i == 3 and 2 <= j, 0, 0, dtype="int32")
for k in T.serial(32):
with T.block("block"):
B[i, j] = T.if_then_else(
i == 3 and 2 <= j, 0, B[i, j] + A[i * 4 + j, k], dtype="int32"
)
class TestPaddedTransformPostProcIfRequiredDueToSideEffects(BasePaddingCompare):
"""Set the transformation padding in a post-processing block.
Like TestPaddedTransformIfThenElse, but the block that produces B
also has the effect of setting `C`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer(14, "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
C[vi] = 0
def expected(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer([4, 4], "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi // 4, vi % 4] = A[vi]
C[vi] = 0
for i, j in T.grid(4, 4):
with T.block("block_pad_B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
B[vi, vj] = 0
class TestPaddedTransformOfInputCreatesAssumption(BasePaddingCompare):
"""Transformation of an input buffer places T.assume locally"""
pad_value = tvm.testing.parameter(42)
def before(A: T.Buffer[14, "int32"], B: T.Buffer[14, "int32"]):
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer[(4, 4), "int32"], B: T.Buffer[14, "int32"]):
for i, j in T.grid(4, 4):
with T.block("buffer_A_assumption"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(T.assume(not (vi == 3 and 2 <= vj) or A[vi, vj] == 42))
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestPaddedTransformNonConstantValue(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformIfThenElse, but the pad value depends on
the indices.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
sch.transform_layout(
"block",
"B",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: i + j,
)
return sch.mod
return transform
def before(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer([4, 4], "int32")
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, vi + vj, A[vi * 4 + vj], dtype="int32"
)
@pytest.mark.xfail(reason="Not yet implemented")
class TestPaddedTransformRepeatedBufferElement(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformOfInputCreatesAssumption, but the pad
value depends on another portion of the buffer. In this case, the
padding at the end of A contains repeated elements from the
beginning of A.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
A = sch.get(sch.get_block("block")).reads[0].buffer
sch.transform_layout(
"block",
"A",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: A[(4 * i + j) % 14],
)
return sch.mod
return transform
def before(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer[(4, 4), "int32"]):
for i, j in T.grid(4, 4):
with T.block("buffer_A_assumption"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(
T.assume(
not (vi == 3 and 2 <= vj)
or A[vi, vj] == A[((4 * vi + j) % 14) // 4, ((4 * vi + j) % 14) % 4]
)
)
B = T.alloc_buffer(14, "int32")
for i in T.grid(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestPadValueMayNotReferenceOtherBuffer(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformRepeatedBufferElement, but the pad value depends on
a different buffer, which is not allowed.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
A = sch.get(sch.get_block("block")).reads[0].buffer
other = tir.decl_buffer(1, A.dtype, name="other")
sch.transform_layout(
"block",
"A",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: other[0],
)
return sch.mod
return transform
def before(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
expected = tvm.tir.schedule.schedule.ScheduleError
class TestTransformLayoutWithVar(tvm.testing.CompareBeforeAfter):
"""Layout transform with dynamic parameter in transform"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
n = sch.mod["main"].params[1]
sch.transform_layout(
"block",
"B",
lambda i: [i // n, i % n],
pad_value=0,
)
return sch.mod
return transform
def before(A: T.Buffer[16, "int32"], n: T.int32):
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer[16, "int32"], n: T.int32):
B = T.alloc_buffer([(-16 % n + 16) // n, n], dtype="int32")
for i, j in T.grid((-16 % n + 16) // n, n):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
# Checks if the transform introduced padding
-16 % n != 0
and (
# If so, is vi in the last group (which may
# include padding).
(vj + vi * n) // n == 16 // n
# And is vj within the padding
and 16 % n <= (vj + vi * n) % n
),
0,
A[vj + vi * n],
dtype="int32",
)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_schedule_utilities.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir import IRModule
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_relu(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@T.prim_func
def matmul_relu_ann1(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i in T.serial(0, 1024, annotations={"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}}):
for j in T.serial(0, 1024, annotations={"test2": 612, "test3": ["aa", 1]}):
for k in T.serial(0, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@T.prim_func
def matmul_relu_ann2(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
T.block_attr({"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}})
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"test2": 0.22, "test3": ["aa", 1]})
D[vi, vj] = T.max(C[vi, vj], 0.0)
@tvm.script.ir_module
class ModuleWithMultipleFuncs:
@T.prim_func
def vector_add(
A: T.Buffer[128, "float32"],
B: T.Buffer[128, "float32"],
) -> None:
for i in range(128):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
@T.prim_func
def vector_add_2(
A: T.Buffer[128, "float32"],
B: T.Buffer[128, "float32"],
) -> None:
for i in range(128):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
@T.prim_func
def tuple_reduction(data: T.Buffer[(4, 32), "float32"], T_add: T.Buffer[(4,), "float32"]) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
with T.block("root"):
T.reads()
T.writes()
data_red_temp_v0 = T.alloc_buffer([4], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([4], dtype="float32")
for i0, i1 in T.grid(4, 32):
with T.block("data_red_temp"):
ax0, k1 = T.axis.remap("SR", [i0, i1])
T.reads(data[ax0, k1])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
with T.init():
data_red_temp_v0[ax0] = T.float32(0)
data_red_temp_v1[ax0] = T.float32(0)
v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1]
)
data_red_temp_v0[ax0] = v_data_red_temp_v0
data_red_temp_v1[ax0] = v_data_red_temp_v1
for i0 in range(4):
with T.block("T_add"):
ax0 = T.axis.remap("S", [i0])
T.reads(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
T.writes(T_add[ax0])
T_add[ax0] = data_red_temp_v0[ax0] + data_red_temp_v1[ax0]
# pylint: enable=no-member,invalid-name,unused-variable
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_tir_schedule_creation():
# Tests:
# - Schedule.__init__ for PrimFunc and IRModule
# - Schedule.mod
# - Schedule.state
sch_1 = tir.Schedule(matmul, debug_mask="all")
sch_2 = tir.Schedule(IRModule({"main": matmul}), debug_mask="all")
assert sch_1.mod["main"].same_as(sch_2.mod["main"])
assert sch_1.state.mod["main"].same_as(sch_2.state.mod["main"])
def test_tir_schedule_get_block():
# Tests:
# - Schedule.get_block
# - Schedule.get_sref
# - Schedule.get
sch = tir.Schedule(matmul, debug_mask="all")
block_rv = sch.get_block(name="update")
block_sref = sch.get_sref(block_rv)
block = sch.get(block_rv)
assert block.name_hint == "update"
assert block_sref.stmt.same_as(block)
assert sch.state.get_sref(block).same_as(block_sref)
assert block.same_as(matmul.body.block.body.body.body[1].body.block)
def test_tir_schedule_work_on():
sch = tir.Schedule(ModuleWithMultipleFuncs, debug_mask="all")
with pytest.raises(ValueError, match="does not know which function to be working on"):
sch.get_block(name="init")
sch.work_on(func_name="vector_add")
sch.get_block(name="init")
def test_tir_schedule_get_loops(use_block_name):
# Tests:
# - Schedule.get_loops
# - Schedule.get
sch = tir.Schedule(matmul, debug_mask="all")
block = "update" if use_block_name else sch.get_block(name="update")
i, j, k = sch.get_loops(block)
assert sch.get(i).loop_var.name == "i"
assert sch.get(j).loop_var.name == "j"
assert sch.get(k).loop_var.name == "k"
def test_tir_schedule_copy_1(use_block_name):
# Tests:
# - Schedule.copy
sch_1 = tir.Schedule(matmul, debug_mask="all")
block_rv = sch_1.get_block(name="update")
i, j, k = sch_1.get_loops(block="update" if use_block_name else block_rv)
assert sch_1.get(i).loop_var.name == "i"
assert sch_1.get(j).loop_var.name == "j"
assert sch_1.get(k).loop_var.name == "k"
sch_2 = sch_1.copy()
assert sch_2.get(block_rv).name_hint == "update"
assert sch_2.get(i).loop_var.name == "i"
assert sch_2.get(j).loop_var.name == "j"
assert sch_2.get(k).loop_var.name == "k"
def test_tir_schedule_copy_2():
sch = tir.Schedule(mod=matmul, debug_mask="all")
i, j, k = sch.get_loops(sch.get_block("update"))
sch_copy = sch.copy()
assert not sch.get_sref(i).same_as(sch_copy.get_sref(i))
assert not sch.get_sref(j).same_as(sch_copy.get_sref(j))
assert not sch.get_sref(k).same_as(sch_copy.get_sref(k))
assert sch.get_sref(i).stmt.same_as(sch_copy.get_sref(i).stmt)
assert sch.get_sref(j).stmt.same_as(sch_copy.get_sref(j).stmt)
assert sch.get_sref(k).stmt.same_as(sch_copy.get_sref(k).stmt)
i_0, i_1 = sch.split(i, factors=[None, 64])
j_0, j_1 = sch_copy.split(j, factors=[None, 32])
assert sch.get_sref(i_0).stmt.extent == 2
assert sch.get_sref(i_1).stmt.extent == 64
with pytest.raises(IndexError):
sch_copy.get_sref(i_0)
with pytest.raises(IndexError):
sch_copy.get_sref(i_1)
with pytest.raises(IndexError):
sch.get_sref(j_0)
with pytest.raises(IndexError):
sch.get_sref(j_1)
assert sch_copy.get_sref(j_0).stmt.extent == 4
assert sch_copy.get_sref(j_1).stmt.extent == 32
verify_trace_roundtrip(sch, mod=matmul)
verify_trace_roundtrip(sch_copy, mod=matmul)
def test_tir_schedule_remove_rv():
# Tests:
# - Schedule.remove_rv
sch = tir.Schedule(matmul, debug_mask="all")
block_rv = sch.get_block(name="update")
assert sch.get(block_rv).name_hint == "update"
sch.remove_rv(block_rv)
with pytest.raises(IndexError):
sch.get(block_rv)
def test_get_child_blocks():
s = tir.Schedule(matmul, debug_mask="all")
init = s.get_block("init")
update = s.get_block("update")
# loop
blocks = s.get_child_blocks(s.get_loops(init)[0])
assert len(blocks) == 2
assert s.get(init) == s.get(blocks[0])
assert s.get(update) == s.get(blocks[1])
# block
root = s.get_block("root")
blocks = s.get_child_blocks(root)
assert len(blocks) == 2
assert s.get(init) == s.get(blocks[0])
assert s.get(update) == s.get(blocks[1])
def test_get_producers(use_block_name):
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
block = "relu" if use_block_name else sch.get_block("relu")
(producer,) = sch.get_producers(block)
assert tvm.ir.structural_equal(
sch.get_sref(producer).stmt,
sch.get_sref(sch.get_block("matmul")).stmt,
)
verify_trace_roundtrip(sch, mod=matmul_relu)
def test_get_producers_multiple_buffer_depdencies(use_block_name):
sch = tir.Schedule(mod=tuple_reduction, debug_mask="all")
block = "T_add" if use_block_name else sch.get_block("T_add")
(producer,) = sch.get_producers(block)
assert tvm.ir.structural_equal(
sch.get_sref(producer).stmt,
sch.get_sref(sch.get_block("data_red_temp")).stmt,
)
def test_get_consumers(use_block_name):
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
block = "matmul" if use_block_name else sch.get_block("matmul")
(consumer,) = sch.get_consumers(block)
assert tvm.ir.structural_equal(
sch.get_sref(consumer).stmt,
sch.get_sref(sch.get_block("relu")).stmt,
)
verify_trace_roundtrip(sch, mod=matmul_relu)
def test_get_consumers_multiple_buffer_depdencies(use_block_name):
sch = tir.Schedule(mod=tuple_reduction, debug_mask="all")
block = "data_red_temp" if use_block_name else sch.get_block("data_red_temp")
(consumer,) = sch.get_consumers(block)
assert tvm.ir.structural_equal(
sch.get_sref(consumer).stmt,
sch.get_sref(sch.get_block("T_add")).stmt,
)
def test_annotate_unannotate_loop():
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
matmul = sch.get_block("matmul")
relu = sch.get_block("relu")
sch.annotate(sch.get_loops(matmul)[0], "test1", "aaa")
sch.annotate(sch.get_loops(matmul)[1], "test2", 612)
sch.annotate(sch.get_loops(matmul)[1], "test3", ["aa", 1])
sch.annotate(sch.get_loops(matmul)[0], "test4", {"arr": [0, 0], "key": 3})
tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann1)
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
sch.unannotate(sch.get_loops(matmul)[0], "test1")
sch.unannotate(sch.get_loops(matmul)[1], "test2")
sch.unannotate(sch.get_loops(matmul)[1], "test3")
sch.unannotate(sch.get_loops(matmul)[0], "test4")
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
def test_annotate_unannotate_block():
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
matmul = sch.get_block("matmul")
relu = sch.get_block("relu")
sch.annotate(matmul, "test1", "aaa")
sch.annotate(relu, "test2", 0.22)
sch.annotate(relu, "test3", ["aa", 1])
sch.annotate(matmul, "test4", {"arr": [0, 0], "key": 3})
tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann2)
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
sch.unannotate(matmul, "test1")
sch.unannotate(relu, "test2")
sch.unannotate(relu, "test3")
sch.unannotate(matmul, "test4")
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_specialize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring, missing-module-docstring
import tvm
from tvm.script import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle, n: T.int32) -> None:
m = T.var("int32")
A = T.match_buffer(a, [m, n])
B = T.match_buffer(b, [m, n])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, n):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_128(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_m_128(a: T.handle, b: T.handle, c: T.handle) -> None:
m = T.var("int32")
A = T.match_buffer(a, [m, 128])
B = T.match_buffer(b, [m, 128])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_m_8x(a: T.handle, b: T.handle, c: T.handle) -> None:
x = T.var("int32")
m = T.var("int32")
A = T.match_buffer(a, [m, x * 8])
B = T.match_buffer(b, [m, x * 8])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, x * 8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
m = T.var("int32")
n = T.var("int32")
A = T.match_buffer(a, (m, n), "float32")
C = T.match_buffer(c, (m, n), "float32")
B = T.alloc_buffer((m, n), "float32")
for i, j in T.grid(m, n):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(m, n):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_128_64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 64), "float32")
C = T.match_buffer(c, (128, 64), "float32")
B = T.alloc_buffer((128, 64), "float32")
for i, j in T.grid(128, 64):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 64):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_128_n(a: T.handle, c: T.handle) -> None:
n = T.var("int32")
A = T.match_buffer(a, (128, n), "float32")
C = T.match_buffer(c, (128, n), "float32")
B = T.alloc_buffer((128, n), "float32")
for i, j in T.grid(128, n):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, n):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32, q: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=q)
B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=q)
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def mem_copy_16_16_8_4(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32", strides=[8, 1], elem_offset=4)
B = T.match_buffer(b, (16, 16), "float32", strides=[8, 1], elem_offset=4)
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def mem_copy_m_n_p_n(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=n)
B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=n)
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def param_in_arith_exprs(a: T.handle, b: T.handle) -> None:
n = T.var("int32")
A = T.match_buffer(a, [n // 8, 8], "int32")
B = T.match_buffer(b, [n], "int32")
for i in range(n - 1):
with T.block():
vi = T.axis.S(n - 1, i)
B[vi] = A[vi // 8, vi % 8] + (n + 1) * 42
@T.prim_func
def param_in_arith_exprs_n_16(a: T.handle, b: T.handle) -> None:
n = T.var("int32")
A = T.match_buffer(a, [2, 8], "int32")
B = T.match_buffer(b, [16], "int32")
for i in range(15):
with T.block():
vi = T.axis.S(15, i)
B[vi] = A[vi // 8, vi % 8] + 714
def test_specialize_nothing():
func = matmul.specialize({})
assert func.same_as(matmul) # Pointer the same
def test_specialize_matmul():
a, _, _, n = matmul.params
# fully specialized
func = matmul.specialize({a: tvm.tir.decl_buffer((128, 128))})
tvm.ir.assert_structural_equal(func, matmul_128)
# partially specialized
func = matmul.specialize({n: 128})
tvm.ir.assert_structural_equal(func, matmul_m_128)
# symbolic specialized
func = matmul.specialize({n: tvm.tir.Var("x", "int32") * 8})
tvm.ir.assert_structural_equal(func, matmul_m_8x)
def test_specialize_elemwise():
a, c = element_wise.params
C = element_wise.buffer_map[c]
# fully specialized
func = element_wise.specialize({a: tvm.tir.decl_buffer((128, 64))})
tvm.ir.assert_structural_equal(func, element_wise_128_64)
# partially specialized
func = element_wise.specialize({c: tvm.tir.decl_buffer((128, C.shape[1]))})
tvm.ir.assert_structural_equal(func, element_wise_128_n)
def test_specialize_mem_copy():
a, _, m, n, p, q = mem_copy.params
# fully specialized
func = mem_copy.specialize({a: tvm.tir.decl_buffer((16, 16), strides=[8, 1], elem_offset=4)})
tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4)
func = mem_copy.specialize({n: 16, m: 16, p: 8, q: 4})
tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4)
# partially specialized
func = mem_copy.specialize({q: n})
tvm.ir.assert_structural_equal(func, mem_copy_m_n_p_n)
def test_specialize_recursive_load():
# TODO(Siyuan): add recursive Load testcase, e.g. A[C[i]]
pass
def test_specialize_with_const_folding():
b = param_in_arith_exprs.params[1]
func = param_in_arith_exprs.specialize({b: tvm.tir.decl_buffer([16])})
tvm.ir.assert_structural_equal(func, param_in_arith_exprs_n_16)
if __name__ == "__main__":
test_specialize_nothing()
test_specialize_matmul()
test_specialize_elemwise()
test_specialize_mem_copy()
test_specialize_recursive_load()
test_specialize_with_const_folding()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_stmt_functor_ir_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_ir_transform():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
x = tvm.tir.call_extern("int32", "TestA", i * 3 + j * 1)
ib.emit(tvm.tir.call_extern("int32", "TestB", x))
ib.emit(tvm.tir.call_extern("int32", "TestC", x))
body = ib.get()
builtin_call_extern = tvm.ir.Op.get("tir.call_extern")
def preorder(op):
if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestC":
return tvm.tir.const(0, "int32")
return None
def postorder(op):
assert isinstance(op, tvm.tir.Call)
if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestA":
return tvm.tir.call_extern("int32", "TestB", op.args[1] + 1)
return op
body = tvm.tir.stmt_functor.ir_transform(body, preorder, postorder, ["tir.Call"])
stmt_list = tvm.tir.stmt_list(body.body.body)
assert stmt_list[0].value.args[1].args[0].value == "TestB"
assert stmt_list[1].value.value == 0
if __name__ == "__main__":
test_ir_transform()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_structural_equal_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
import pytest
from tvm import te
from tvm.runtime import ObjectPath
def consistent_equal(x, y, map_free_vars=False):
struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)
struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)
xhash = tvm.ir.structural_hash(x, map_free_vars)
yhash = tvm.ir.structural_hash(y, map_free_vars)
if struct_equal0 != struct_equal1:
raise ValueError(
"Non-commutative {} vs {}, sequal0={}, sequal1={}".format(
x, y, struct_equal0, struct_equal1
)
)
# NOTE: hash colision can happen but should be rare.
# we can confirm that hash colison doesn't happen for our testcases
if struct_equal0 != (xhash == yhash):
raise ValueError(
"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format(
x, y, struct_equal0, xhash, yhash
)
)
return struct_equal0
def get_sequal_mismatch(x, y, map_free_vars=False):
mismatch_0 = tvm.ir.base.get_first_structural_mismatch(x, y, map_free_vars)
mismatch_1 = tvm.ir.base.get_first_structural_mismatch(y, x, map_free_vars)
if mismatch_0 is None and mismatch_1 is None:
return None
if (
mismatch_0 is None
or mismatch_1 is None
or mismatch_0[0] != mismatch_1[1]
or mismatch_0[1] != mismatch_1[0]
):
raise ValueError(
"Non-commutative {} vs {}, mismatch_0={}, mismatch_1={}".format(
x, y, mismatch_0, mismatch_1
)
)
return mismatch_0
def test_exprs():
# save load json
x = tvm.tir.const(1, "int32")
y = tvm.tir.const(10, "int32")
vx = te.var("x")
vy = te.var("y")
vz = te.var("z")
zx = vx + vx
zy = vy + vy
assert consistent_equal(zx * zx, (vx + vx) * (vx + vx), map_free_vars=False)
# test assert trigger.
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(x, y)
assert not consistent_equal(vx, vy)
assert consistent_equal(vx, vy, map_free_vars=True)
# corner case lhs:vx == rhs:vy, but cannot map it iteslf
assert not consistent_equal(vx + vx, vy + vx, map_free_vars=True)
# corner case lhs:vx == rhs:vy, lhs:vy == rhs:vx
assert consistent_equal(vx + vy, vy + vx, map_free_vars=True)
# corner case2: rolling remap.
assert consistent_equal(vx + vy + vz, vy + vz + vx, map_free_vars=True)
assert not consistent_equal(vx + 1, vy + 1, map_free_vars=False)
# Defintition remap
assert consistent_equal(tvm.tir.Let(vx, 1, vx - 1), tvm.tir.Let(vy, 1, vy - 1))
# Default same address free var remap
assert consistent_equal(tvm.tir.Let(vx, 1, vx // vz), tvm.tir.Let(vy, 1, vy // vz))
assert consistent_equal(zx * zx, zx * zx)
assert consistent_equal(zx * zx, zy * zy, map_free_vars=True)
assert not consistent_equal(zx * zx, zy * zy, map_free_vars=False)
def test_prim_func():
x = te.var("x")
y = te.var("y")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x + y))
func1 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(y + x))
assert not consistent_equal(func0, func1)
# new cases
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func0 = tvm.tir.PrimFunc([x, y, b], stmt)
# easiest way to deep copy is via save/load
func1 = tvm.ir.load_json(tvm.ir.save_json(func0))
tvm.ir.assert_structural_equal(func0, func1)
data0 = tvm.nd.array([1, 2, 3])
data1 = tvm.nd.array([1, 2, 3])
# attributes and ndarrays
func0 = func0.with_attr("data", data0)
func1 = func1.with_attr("data", data1)
# IRModules
mod0 = tvm.IRModule.from_expr(func0)
mod1 = tvm.IRModule.from_expr(func1)
tvm.ir.assert_structural_equal(mod0, mod1)
def test_prim_func_param_count_mismatch():
x = te.var("x")
y = te.var("y")
z = te.var("z")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x))
func1 = tvm.tir.PrimFunc([x, y, z], tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_lhs_path = ObjectPath.root().attr("params").missing_array_element(2)
expected_rhs_path = ObjectPath.root().attr("params").array_index(2)
assert lhs_path == expected_lhs_path
assert rhs_path == expected_rhs_path
def test_prim_func_param_dtype_mismatch():
x = te.var("x")
y_0 = te.var("y", dtype="int32")
y_1 = te.var("z", dtype="float32")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y_0], tvm.tir.Evaluate(x))
func1 = tvm.tir.PrimFunc([x, y_1], tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_path = ObjectPath.root().attr("params").array_index(1).attr("dtype")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_prim_func_body_mismatch():
x_0 = te.var("x")
y_0 = te.var("y")
x_1 = te.var("x")
y_1 = te.var("y")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x_0, y_0], tvm.tir.Evaluate(x_0 + x_0))
func1 = tvm.tir.PrimFunc([x_1, y_1], tvm.tir.Evaluate(x_1 + y_1))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_path = ObjectPath.root().attr("body").attr("value").attr("b")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_array():
x = np.arange(10)
nx = tvm.nd.array(x)
ny = tvm.nd.array(x)
nz = tvm.nd.array(x.reshape(2, 5))
assert consistent_equal(nx, ny)
assert not consistent_equal(nx, nz)
def test_env_func():
@tvm.register_func("test.sequal.env_func")
def test(x):
return x + 1
x = tvm.ir.EnvFunc.get("test.sequal.env_func")
y = tvm.ir.EnvFunc.get("test.sequal.env_func")
assert consistent_equal(y, x)
def test_attrs():
x = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx")
y = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx")
z = tvm.ir.make_node("attrs.TestAttrs", axis=2, name="xx")
tvm.ir.assert_structural_equal(y, x)
assert not consistent_equal(y, z)
x = tvm.runtime.convert({"x": [1, 2, 3], "y": 2})
y = tvm.runtime.convert({"y": 2, "x": [1, 2, 3]})
z = tvm.runtime.convert({"y": 2, "x": [1, 2, 3, 4]})
assert consistent_equal(y, x)
assert not consistent_equal(y, z)
def test_stmt():
x = te.var("x")
y = te.var("y")
n = 128
A = te.placeholder((n, n), name="A")
B = te.placeholder((n, n), name="B")
ii = te.var("i")
jj = te.var("j")
Ab = tvm.tir.decl_buffer((n,), name="A")
n = te.var("n")
def func2():
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 2
A[j] = A[j] + 2
return ib.get()
assert consistent_equal(func2(), func2())
def test_buffer_storage_scope():
x = te.var("x", dtype="handle")
buffer_local_0 = tvm.tir.decl_buffer((10, 10), "float32", scope="local")
buffer_local_1 = tvm.tir.decl_buffer((10, 10), "float32", scope="local")
buffer_global = tvm.tir.decl_buffer((10, 10), "float32")
buffer_empty = tvm.tir.decl_buffer((10, 10), "float32", scope="")
func0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_0})
func1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_1})
func2 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_global})
func3 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_empty})
assert consistent_equal(func0, func1)
assert consistent_equal(func2, func3)
assert not consistent_equal(func0, func2)
def test_buffer_map_mismatch():
x = te.var("x")
buffer_0 = tvm.tir.decl_buffer((10, 10))
buffer_0_clone = tvm.tir.decl_buffer((10, 10))
buffer_1 = tvm.tir.decl_buffer((10, 20))
func_0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0})
func_0_clone = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0_clone})
func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_1})
lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1)
expected_path = (
ObjectPath.root().attr("buffer_map").map_value(x).attr("shape").array_index(1).attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
assert get_sequal_mismatch(func_0, func_0_clone) is None
def test_buffer_map_length_mismatch():
x = te.var("x")
y = te.var("x")
buffer_0 = tvm.tir.decl_buffer((10, 10))
buffer_1 = tvm.tir.decl_buffer((10, 20))
func_0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0})
func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0, y: buffer_1})
lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1)
expected_lhs_path = ObjectPath.root().attr("buffer_map").missing_map_entry()
assert lhs_path == expected_lhs_path
expected_rhs_path = ObjectPath.root().attr("buffer_map").map_value(y)
assert rhs_path == expected_rhs_path
def test_buffer_load_store():
b = tvm.tir.decl_buffer((10, 10), "float32")
x = tvm.tir.BufferLoad(b, [0, 1])
y = tvm.tir.BufferLoad(b, [0, 1])
z = tvm.tir.BufferLoad(b, [1, 2])
assert consistent_equal(y, x)
assert not consistent_equal(y, z)
i = tvm.tir.Var("x", "int32")
sx = tvm.tir.BufferStore(b, 0.1, [0, i])
sy = tvm.tir.BufferStore(b, 0.1, [0, i])
sz = tvm.tir.BufferStore(b, 0.1, [1, i])
assert consistent_equal(sy, sx)
assert not consistent_equal(sy, sz)
def test_while():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
wx = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
wy = tvm.tir.While(y > 0, tvm.tir.Evaluate(y))
assert not consistent_equal(wx, wy)
assert consistent_equal(wx, wy, map_free_vars=True)
def test_while_condition_mismatch():
x = tvm.tir.Var("x", "int32")
w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
w_1 = tvm.tir.While(x < 0, tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1)
expected_path = ObjectPath.root().attr("condition")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_while_body_mismatch():
x = tvm.tir.Var("x", "int32")
w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
w_1 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x + 1))
lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1)
expected_path = ObjectPath.root().attr("body").attr("value")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_mismatch():
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 99),
tvm.tir.Evaluate(x + 3),
]
)
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_path = (
ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_mismatch_different_lengths():
# Make sure we report a difference inside the array first, rather than the difference in length
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 3)])
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_path = (
ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_length_mismatch():
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2)])
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_lhs_path = ObjectPath.root().attr("seq").array_index(3)
expected_rhs_path = ObjectPath.root().attr("seq").missing_array_element(3)
assert lhs_path == expected_lhs_path
assert rhs_path == expected_rhs_path
if __name__ == "__main__":
test_exprs()
test_prim_func()
test_attrs()
test_array()
test_env_func()
test_stmt()
test_buffer_storage_scope()
test_buffer_load_store()
test_while()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_te_extern_primfunc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
from tvm import tir, te, TVMError
from tvm.script import tir as T
from tvm.arith import _ffi_api as _ffi_arith_api
from tvm.tir.schedule import _ffi_api as _ffi_schedule_api
# TODO(csullivan): Additional tests cases needed:
# - PrimFunc with 1 arg, inplace update
# - PrimFunc with buffer that uses custom storage_scope
@T.prim_func
def func_1(A: T.Buffer[(16,), "float32"], C: T.Buffer[(1,), "float32"]):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
C[0] = C[0] + A[i] + B[0] + T.float32(1)
A[i] = B[0] + T.float32(1)
def verify_func_1(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
module(a, c)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1), c.numpy(), rtol=1e-4)
# also test in place update
tvm.testing.assert_allclose(a_np * 2 + 1, a.numpy(), rtol=1e-4)
@T.prim_func
def func_2(
C: T.Buffer[(1,), "float32"], A: T.Buffer[(16,), "float32"], D: T.Buffer[(2,), "float32"]
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_2(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
module(c, a, d)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
@T.prim_func
def func_3(
C: T.Buffer[(1,), "float32"],
A: T.Buffer[(16,), "float32"],
D: T.Buffer[(2,), "float32"],
E: T.Buffer[(16,), "float32"],
F: T.Buffer[(16,), "float32"],
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
E[i] = A[i]
F[i] = E[i] + 1.0
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_3(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
e_np = np.zeros((16,), dtype=np.float32)
f_np = np.zeros((16,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
e = tvm.nd.array(e_np, device=tvm.cpu(0))
f = tvm.nd.array(f_np, device=tvm.cpu(0))
module(c, a, d, e, f)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4)
@T.prim_func
def func_4(
C: T.Buffer[(1,), "float32"],
A: T.Buffer[(16,), "float32"],
F: T.Buffer[(16,), "float32"],
D: T.Buffer[(2,), "float32"],
E: T.Buffer[(16,), "float32"],
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
E[i] = A[i]
F[i] = E[i] + 1.0
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_4(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
e_np = np.zeros((16,), dtype=np.float32)
f_np = np.zeros((16,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
e = tvm.nd.array(e_np, device=tvm.cpu(0))
f = tvm.nd.array(f_np, device=tvm.cpu(0))
module(c, a, f, d, e)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4)
class TestPrimFuncs:
func, verify = tvm.testing.parameters(
[func_1, verify_func_1],
[func_2, verify_func_2],
[func_3, verify_func_3],
[func_4, verify_func_4],
)
def test_primfunc_call(self, func, verify):
target = tvm.target.Target("llvm")
func = tvm.build(func, target=target)
verify(func)
def test_te_extern_call(self, func, verify):
ir_mod = tvm.IRModule.from_expr(func.with_attr("global_symbol", "main"))
prim_func = ir_mod["main"]
input_tensors = create_input_tensors_for_primfunc(prim_func)
output = te.extern_primfunc(input_tensors, prim_func)
rt_prim_func = te.create_prim_func(tensors_from_extern_op(output, prim_func))
tvm.ir.assert_structural_equal(tvm.lower(prim_func), tvm.lower(rt_prim_func))
target = tvm.target.Target("llvm")
func = tvm.build(rt_prim_func, target=target)
verify(func)
def tensors_from_extern_op(extern, func):
if isinstance(extern, list):
output_tensors = extern
else:
output_tensors = [extern]
output_buffers = []
input_buffers = []
input_tensors = []
for ext in output_tensors:
output_buffers.extend(ext.op.output_placeholders)
input_buffers.extend(ext.op.input_placeholders)
input_tensors.extend(ext.op.input_tensors)
input_binds = dict(zip(input_buffers, input_tensors))
output_binds = dict(zip(output_buffers, output_tensors))
buffer_to_tensor = {**input_binds, **output_binds}
ordered_tensors = []
for var in func.params:
buf = func.buffer_map[var]
ordered_tensors.append(buffer_to_tensor[buf])
return ordered_tensors
def create_input_tensors_for_primfunc(primfunc):
access_map = {k: tuple(v) for k, v in _ffi_arith_api.DomainTouchedAccessMap(primfunc).items()}
in_buffers = [buf for buf, access in access_map.items() if len(access[0])]
out_buffers = [buf for buf, access in access_map.items() if len(access[1])]
assert in_buffers, "PrimFunc has no input buffers"
assert out_buffers, "PrimFunc has no output buffers"
outputs = []
inplace = []
inputs = in_buffers
for obuf in out_buffers:
if obuf in in_buffers:
inplace.append(obuf)
else:
outputs.append(obuf)
if not outputs:
iobuf = inplace.pop()
inputs.remove(iobuf)
outputs = [iobuf]
def create_tensors(input_buffers):
tensors = []
for buf in input_buffers:
t = te.placeholder(buf.shape, dtype=buf.dtype, name=buf.name + "_placeholder")
tensors.append(t)
return tensors
return create_tensors(inputs)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_texture_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm.ir.module import IRModule
from tvm import tir
from tvm.script import tir as T
def test_texture_scope():
@tvm.script.ir_module
class PlusOneMultTwo:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128, 4), dtype="float32", scope="global.texture")
B = T.alloc_buffer((128, 128, 4), dtype="float32", scope="global.texture")
C = T.match_buffer(b, (128, 128, 4), dtype="float32", scope="global.texture")
for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"):
for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"):
for k in T.serial(4):
with T.block("B"):
vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k])
B[vb, vt, vk] = A[vb, vt, vk] + T.float32(1)
for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"):
for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"):
for k in T.serial(4):
with T.block("C"):
vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k])
C[vb, vt, vk] = B[vb, vt, vk] * T.float32(2)
sch = tir.Schedule(PlusOneMultTwo, debug_mask="all")
def schedule_block(block):
_, _, inner = sch.get_loops(block)
sch.vectorize(inner)
schedule_block(sch.get_block("B"))
schedule_block(sch.get_block("C"))
target = tvm.target.Target("opencl")
mod = tvm.build(sch.mod["main"], target=target)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_bf16_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import topi
from tvm import te
def lower_stmt(sche, params, passfunc):
func = tvm.driver.build_module.schedule_to_module(sche, params, "main", None)["main"]
func = passfunc()(tvm.IRModule.from_expr(func))["main"]
stmt = func.body
return stmt
def test_promote():
def runpass(op, passfunc):
a = te.placeholder((100,), dtype="bfloat16")
b = te.placeholder((100,), dtype="bfloat16")
c = te.compute((100,), lambda i: op(a[i], b[i]))
s = te.create_schedule(c.op)
return lower_stmt(s, [a, b, c], passfunc)
def get_promoted(op):
a = te.placeholder((100,), dtype="bfloat16")
b = te.placeholder((100,), dtype="bfloat16")
c = te.compute(
(100,),
lambda i: topi.cast(op(topi.cast(a[i], "float"), topi.cast(b[i], "float")), "bfloat16"),
)
s = te.create_schedule(c.op)
func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"]
return func.body
def test_promoted(op):
stmt = runpass(op, tvm.tir.transform.BF16Promote)
tvm.ir.assert_structural_equal(stmt, get_promoted(op))
test_promoted(topi.add)
test_promoted(topi.subtract)
test_promoted(topi.multiply)
test_promoted(topi.divide)
def test_eliminate():
def to32(v):
return topi.cast(v, "float")
def to16(v):
return topi.cast(v, "bfloat16")
def get_eliminated():
a = te.placeholder((100,), dtype="bfloat16")
b = te.placeholder((100,), dtype="bfloat16")
c = te.compute(
(100,),
lambda i: to16(
topi.add(
to32(
to16(
topi.add(
to32(a[i]),
to32(b[i]),
)
)
),
to32(
to16(
topi.add(
to32(a[i]),
to32(b[i]),
)
)
),
)
),
)
s = te.create_schedule(c.op)
stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16CastElimination)
return stmt
def get_target():
a = te.placeholder((100,), dtype="bfloat16")
b = te.placeholder((100,), dtype="bfloat16")
c = te.compute(
(100,),
lambda i: to16(
topi.add(
topi.add(
to32(a[i]),
to32(b[i]),
),
topi.add(
to32(a[i]),
to32(b[i]),
),
)
),
)
s = te.create_schedule(c.op)
func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"]
return func.body
tvm.ir.assert_structural_equal(get_eliminated(), get_target())
def test_legalize():
def to32(v):
uint32_v = topi.cast(v, "uint32")
uint32_v = tvm.tir.call_intrin(
"uint32", "tir.shift_left", uint32_v, tvm.tir.const(16, "uint32")
)
return tvm.tir.call_intrin("float32", "tir.reinterpret", uint32_v)
def to16(v):
uint32_v = tvm.tir.call_intrin("uint32", "tir.reinterpret", v)
rounding_bias = tvm.tir.call_intrin(
"uint32", "tir.shift_right", uint32_v, tvm.tir.const(16, "uint32")
)
rounding_bias = tvm.tir.call_intrin(
"uint32", "tir.bitwise_and", rounding_bias, tvm.tir.const(1, "uint32")
)
rounding_bias = rounding_bias + tvm.tir.const(0x7FFF, "uint16")
uint32_v = uint32_v + rounding_bias
uint32_v = tvm.tir.call_intrin(
"uint32", "tir.shift_right", uint32_v, tvm.tir.const(16, "uint32")
)
return topi.cast(uint32_v, "uint16")
def check(fcompute_before, fcompute_after):
a = te.placeholder((100,), dtype="bfloat16", name="A")
b = te.placeholder((100,), dtype="bfloat16", name="B")
c = te.compute((100,), fcompute_before(a, b), name="C")
s = te.create_schedule(c.op)
stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16Legalize)
a = te.placeholder((100,), dtype="uint16", name="A")
b = te.placeholder((100,), dtype="uint16", name="B")
c = te.compute((100,), fcompute_after(a, b), name="C")
s = te.create_schedule(c.op)
func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"]
tvm.ir.assert_structural_equal(stmt, func.body)
def orig1(a, b):
return lambda i: a[i] + b[i] + a[99 - i] + b[99 - i]
def after1(a, b):
return lambda i: to16(to32(a[i]) + to32(b[i]) + to32(a[99 - i]) + to32(b[99 - i]))
def orig2(a, b):
return lambda i: a[i] * b[i] + a[99 - i] * b[99 - i] + a[i]
def after2(a, b):
return lambda i: to16(
to32(a[i]) * to32(b[i]) + to32(a[99 - i]) * to32(b[99 - i]) + to32(a[i])
)
check(orig1, after1)
check(orig2, after2)
if __name__ == "__main__":
test_promote()
test_eliminate()
test_legalize()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_combine_context_call.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_for():
dev_type = te.var("dev_type")
def device_context(dev_id):
ctx = tvm.tir.call_extern("handle", "device_context", dev_type, dev_id)
return tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx])
ib = tvm.tir.ir_builder.create()
n = te.var("n")
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(0), A.asobject().data))
with ib.for_range(0, 10, name="j") as j:
ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(1), A.asobject().data))
ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(0), A.asobject().data))
body = ib.get()
mod = tvm.IRModule({"func": tvm.tir.PrimFunc([dev_type, n], body)})
mod = tvm.tir.transform.CombineContextCall()(mod)
assert mod["func"].body.value.dtype == "handle"
assert mod["func"].body.body.value.dtype == "handle"
if __name__ == "__main__":
test_for()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_common_subexpr_elim.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import tvm
from tvm import auto_scheduler, te, topi
from tvm.ir.base import save_json
from tvm.ir.module import IRModule
from tvm.script import tir as T
# -----------------------------------------------------
# Basic test for the expected Behavior of the CSE pass
# -----------------------------------------------------
# A test program which gives the opportunity for the CSE pass to introduce two new variables,
# at two different levels
def test_cse():
z1 = te.var("z1")
z2 = te.var("z2")
z3 = te.var("z3")
i1 = te.var("i1")
i2 = te.var("i2")
x = te.var("x")
y = te.var("y")
a = te.var("a")
b = te.var("b")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let z1=1 in let z2=2 in
# Mem[i1] = z1+z2;
# let x = 1 in let y = 1 in
# let a = (x+y) + (z1+z2) in
# let b = (x+y) + z3 in
# Mem[i2] = a+b;
body = tvm.tir.LetStmt(
z1,
1,
tvm.tir.LetStmt(
z2,
2,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, z1 + z2, [i1]),
tvm.tir.LetStmt(
x,
1,
tvm.tir.LetStmt(
y,
1,
tvm.tir.LetStmt(
a,
(x + y) + (z1 + z2),
tvm.tir.LetStmt(
b, (x + y) + z3, tvm.tir.BufferStore(buffer, a + b, [i2])
),
),
),
),
]
),
),
)
# This test program gives the opportunity to introduce two new variables, at two different
# levels and to perform replacements in the value of "a" and "b", using these new variables.
# We will check all of that underneath and more, making also sure that nothing else has changed
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, z3], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "z1"
assert body.value == 1
body = body.body
assert body.var.name == "z2"
assert body.value == 2
# This is the let-in for the first variable generated cse_var_1
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
assert tvm.ir.structural_equal(body.value, z1 + z2)
assert isinstance(body.body, tvm.tir.SeqStmt)
body = body.body
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.LetStmt)
body = body[1]
assert body.var.name == "x"
assert body.value == 1
body = body.body
assert body.var.name == "y"
assert body.value == 1
# This is the let-in for the second variable generated cse_var_2
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
assert tvm.ir.structural_equal(body.value, x + y)
body = body.body
body.var.name == "a"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + cse_var_1)
body = body.body
body.var.name == "b"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + z3)
assert isinstance(body.body, tvm.tir.BufferStore)
# -----------------------------------------------------
# Tests related to If nodes
# -----------------------------------------------------
# First specific test for if nodes : Some duplicated computations appear only in one branch (here
# the Then branch), not in both branches.
# In this case, the CSE pass should introduce the redundant computation at the top of the Then
# branch, not before the whole If (otherwise that would lead to some computations being computed
# for nothing when it is the Else branch that is executed).
def test_cse_ifNode_1():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y+z
# }
# else {
# Mem[i3] = y
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[tvm.tir.BufferStore(buffer, y + z, [i1]), tvm.tir.BufferStore(buffer, y + z, [i2])]
),
tvm.tir.BufferStore(buffer, y, [i3]),
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "b"
assert body.value == 1
assert isinstance(body.body, tvm.tir.IfThenElse)
body = body.body
assert isinstance(body.then_case, tvm.tir.LetStmt)
body = body.then_case
# The let-in introduced by the CSE should appear now, inside the Then branch of the If node
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# Second test for if nodes : Some duplicated computations appear in both the Then and Else branch.
# In this case, the CSE pass should introduce the redundant computation before the whole If node,
# because regardless of the execution path, it is going to be computed.
def test_cse_ifNode_2():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y
# }
# else {
# Mem[i3] = y+z
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, y + z, [i1]), # (y+z) is present in Then branch
tvm.tir.BufferStore(buffer, y, [i2]),
]
),
tvm.tir.BufferStore(buffer, y + z, [i3]), # and also present in the Else branch
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The let-in introduced by the CSE should appear now, at the toplevel (i.e. before the If)
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# -------------------------------------------------------------------------------------------------
# Test commoning in cascade : after having introduced a big exp ((x+y)+z) into a new variable,
# it will become possible to do another commoning for (x+y) which appears both in the new variable
# and in the rest of the program.
# -------------------------------------------------------------------------------------------------
def test_cse_cascade():
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
x = te.var("x")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# Mem[i1] = (x+y)+z;
# Mem[i2] = (x+y)+z;
# Mem[i3] = x+y
body = tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, (x + y) + z, [i1]),
tvm.tir.BufferStore(buffer, (x + y) + z, [i2]),
tvm.tir.BufferStore(buffer, (x + y), [i3]),
]
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, x, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The second let-in (by order introduced) introduced by the CSE should appear first
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
# and it should contain the expression (x+y)
assert tvm.ir.structural_equal(body.value, (x + y))
body = body.body
assert isinstance(body, tvm.tir.LetStmt)
# The first let-in (by order introduced) introduced by the CSE should appear now, after the 2nd
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
# and it should contain the expression cse_var_2+z
assert tvm.ir.structural_equal(body.value, cse_var_2 + z)
body = body.body
assert isinstance(body, tvm.tir.SeqStmt)
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.BufferStore)
assert isinstance(body[2], tvm.tir.BufferStore)
store1 = body[0]
store2 = body[1]
store3 = body[2]
assert tvm.ir.structural_equal(store1.value, cse_var_1)
assert tvm.ir.structural_equal(store2.value, cse_var_1)
assert tvm.ir.structural_equal(store3.value, cse_var_2)
# -----------------------------------------------------------------------------------------
# A test which ensures that we don't perform normalizations outside of introduced variables
# -----------------------------------------------------------------------------------------
def test_no_normalization_without_commoning():
x = te.var("x")
y = te.var("y")
z = te.var("z")
a = te.var("a")
# Test prog :
# let a = x + (y + z) in a
body = tvm.tir.LetStmt(a, x + (y + z), tvm.tir.Evaluate(a))
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "a"
assert tvm.ir.structural_equal(body.value, x + (y + z))
# -------------------------------------------------
# Part for testing the commoning with equivalences
# -------------------------------------------------
@T.prim_func
def func_distributivity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None:
B = T.buffer_decl((50,), "int32")
B[i1] = x * (y + z)
B[i2] = x * y + x * z
@T.prim_func
def func_distributivity_expected(
i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32
) -> None:
B = T.buffer_decl((50,), "int32")
cse_var_1 = T.var("int32")
with T.let(cse_var_1, x * y + x * z):
B[i1] = cse_var_1
B[i2] = cse_var_1
@T.prim_func
def func_associativity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None:
B = T.buffer_decl((50,), "int32")
B[i1] = (x + y) + z
B[i2] = x + (y + z)
@T.prim_func
def func_associativity_expected(
i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32
) -> None:
B = T.buffer_decl((50,), "int32")
cse_var_1 = T.var("int32")
with T.let(cse_var_1, (x + y) + z):
B[i1] = cse_var_1
B[i2] = cse_var_1
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod)
tvm.transform.PrintIR()(body)
tvm.ir.assert_structural_equal(body["main"], transformed)
def test_semantic_equiv_distributivity():
_check(func_distributivity, func_distributivity_expected)
def test_semantic_equiv_associativity():
_check(func_associativity, func_associativity_expected)
# -----------------------------------------------------
# Tests that verify the determinism of the pass
# -----------------------------------------------------
def test_deterministic_cse():
import random
"""Test deterministic allocation of CSE vars
We expect something like
result = (x + 1) + (x + 2) + (x + 3) + (x + 1) + (x + 2) + (x + 3)
-->
cse_var_3 = (x + 1)
cse_var_2 = (x + 2)
cse_var_1 = (x + 3)
result = cse_var_3 + cse_var_2 + cse_var_1 + cse_var_3 + cse_var_2 + cse_var_1
"""
NUM_TERMS = 10
REPEATS = 10
x = te.var("x")
result = te.var("result")
offsets = sorted([i + 1 for i in range(NUM_TERMS)])
inc1 = [(x + offsets[i]) for i in range(NUM_TERMS)]
inc2 = [(x + offsets[i]) for i in range(NUM_TERMS)]
expression = x
for add in inc1 + inc2:
expression = expression + add
let_stmt = tvm.tir.LetStmt(result, expression, tvm.tir.Evaluate(result))
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], let_stmt))
initial_hash = None
for _ in range(REPEATS):
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
body = body["main"]
# Hash and ensure serialize json is the same every time
json_val = save_json(body)
json_hash = hashlib.sha256(json_val.encode()).hexdigest()
if initial_hash is None:
initial_hash = json_hash
assert json_hash == initial_hash
# Needed for the second test on determinism
LOG_LINE = '{"i": [["[\\"conv2d_layer\\", 1, 7, 7, 512, 512, 3, 3, [1, 1], [1, 1]]", \
"llvm -keys=cpu -mcpu=broadwell -num-cores=2", \
[8, 64, 64, 0, 0, 0, 0, 0], "", 1, []], [[], [["CI", 5], \
["SP", 3, 0, 1, [1, 1, 1], 1], ["SP", 3, 4, 512, [1, 32, 16], 1], \
["SP", 3, 8, 7, [7, 1, 1], 1], ["SP", 3, 12, 7, [1, 1, 1], 1], \
["SP", 3, 16, 512, [1], 1], ["SP", 3, 18, 3, [1], 1], ["SP", 3, 20, 3, [3], 1], \
["RE", 3, [0, 4, 8, 12, 1, 5, 9, 13, 16, 18, 20, 2, 6, 10, 14, 17, 19, 21, 3, 7, \
11, 15]], ["FSP", 6, 0, 1, 2], ["FSP", 6, 3, 2, 2], ["FSP", 6, 6, 3, 2], \
["FSP", 6, 9, 4, 2], ["RE", 6, [0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], \
["CA", 3, 6, 7], ["CA", 1, 6, 5], ["FU", 6, [0, 1, 2, 3, 4, 5]], ["AN", 6, 0, 3], \
["PR", 3, 0, "auto_unroll_max_step$512"], ["AN", 1, 3, 2], ["AN", 3, 21, 2], \
["AN", 6, 6, 2]]]], "r": [[0.0331129], 0, 0.900362, 1647464342], "v": "v0.6"}\n'
# The workload associated with the log
@auto_scheduler.register_workload
def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
data = te.placeholder((N, CI, H, W), name="data")
kernel = te.placeholder((CO, CI, KH, KW), name="kernel")
bias = te.placeholder((1, CO, 1, 1), name="bias")
conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32")
out = topi.nn.relu(conv + bias)
return [data, kernel, bias, out]
def test_deterministic_cse_2():
inp, inr = auto_scheduler.measure_record.load_record_from_string(LOG_LINE)
inp = auto_scheduler.measure.recover_measure_input(inp, rebuild_state=True)
initial_hash = None
for _ in range(10):
sch, args = inp.task.compute_dag.apply_steps_from_state(inp.state)
ir_module = tvm.lower(sch, args)
primfunc = ir_module["main"]
json_str = save_json(primfunc)
new_hash = hashlib.sha256(json_str.encode("utf-8")).hexdigest()
# Make sure that all the hashes are going to be the same
if initial_hash is None:
initial_hash = new_hash
assert new_hash == initial_hash
if __name__ == "__main__":
# Basic test:
test_cse()
# Tests related to If nodes:
test_cse_ifNode_1()
test_cse_ifNode_2()
# Test performing a commoning on a commoning:
test_cse_cascade()
# Test that verifies that the input program itself is not being normalized by the pass:
test_no_normalization_without_commoning()
# Tests that turn on the equivalence of terms and verify the commoning with equivalences:
test_semantic_equiv_distributivity()
test_semantic_equiv_associativity()
# Tests that verify the determinism of the pass:
test_deterministic_cse()
test_deterministic_cse_2()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_compact_buffer_region.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.CompactBufferAllocation()(mod)
mod = tvm.tir.transform.Simplify()(mod)
transformed = tvm.tir.transform.Simplify()(tvm.IRModule.from_expr(transformed))["main"]
tvm.ir.assert_structural_equal(mod["main"], transformed)
@T.prim_func
def elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((16, 16), "float32")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
B[i, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i, j])
T.writes(C[i, j])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def compacted_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((1, 16), "float32")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[0, j])
B[0, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j] * 2.0
@T.prim_func
def unschedulable_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((16, 16), "float32")
for j in range(0, 16):
T.evaluate(T.call_extern("dummy_extern_function", B.data, dtype="int32"))
B[i, j] = A[i, j] + 1.0
for j in range(0, 16):
C[i, j] = B[i, j] * 2.0
@T.prim_func
def param_buffer_access_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (20, 20), "float32")
B = T.match_buffer(c, (20, 20), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(B[i, 0:16])
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
B[i, j] = A[i, j] + 1.0
@T.prim_func
def shared_mem_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 2, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="vthread"):
for i2 in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16])
T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16])
B = T.alloc_buffer((16, 16), "float32", scope="shared")
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, j])
T.writes(B[i0 * 8 + i1 * 4 + i2, j])
B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i0 * 8 + i1 * 4 + i2, j])
T.writes(C[i0 * 8 + i1 * 4 + i2, j])
C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0
@T.prim_func
def compacted_shared_mem_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 2, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="vthread"):
for i2 in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16])
T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16])
B = T.alloc_buffer((8, 16), "float32", scope="shared")
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, j])
T.writes(B[i1 * 4 + i2, j])
B[i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i1 * 4 + i2, j])
T.writes(C[i0 * 8 + i1 * 4 + i2, j])
C[i0 * 8 + i1 * 4 + i2, j] = B[i1 * 4 + i2, j] * 2.0
@T.prim_func
def warp_mem_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 2, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="vthread"):
for i2 in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16])
T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16])
B = T.alloc_buffer((16, 16), "float32", scope="warp")
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, j])
T.writes(B[i0 * 8 + i1 * 4 + i2, j])
B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i0 * 8 + i1 * 4 + i2, j])
T.writes(C[i0 * 8 + i1 * 4 + i2, j])
C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0
@T.prim_func
def compacted_warp_mem_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 2, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="vthread"):
for i2 in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16])
T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16])
B = T.alloc_buffer((4, 16), "float32", scope="warp")
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 8 + i1 * 4 + i2, j])
T.writes(B[i2, j])
B[i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i2, j])
T.writes(C[i0 * 8 + i1 * 4 + i2, j])
C[i0 * 8 + i1 * 4 + i2, j] = B[i2, j] * 2.0
@T.prim_func
def symbolic_func(a: T.handle, c: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (n * 8,), "float32")
C = T.match_buffer(c, (n * 8,), "float32")
for i in range(0, n):
with T.block():
T.reads(A[i * 8 : i * 8 + 8])
T.writes(C[i * 8 : i * 8 + 8])
B = T.alloc_buffer((n * 8,), "float32")
for j in range(0, 8):
with T.block():
T.reads(A[i * 8 + j])
T.writes(B[i * 8 + j])
B[i * 8 + j] = A[i * 8 + j] + 1.0
for j in range(0, 8):
with T.block():
T.reads(B[i * 8 + j])
T.writes(C[i * 8 + j])
C[i * 8 + j] = B[i * 8 + j] * 2.0
@T.prim_func
def compacted_symbolic_func(a: T.handle, c: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (n * 8,), "float32")
C = T.match_buffer(c, (n * 8,), "float32")
for i in range(0, n):
with T.block():
T.reads(A[i * 8 : i * 8 + 8])
T.writes(C[i * 8 : i * 8 + 8])
B = T.alloc_buffer((T.min(n, 1) * 8,), "float32")
for j in range(0, 8):
with T.block():
T.reads(A[i * 8 + j])
T.writes(B[j])
B[j] = A[i * 8 + j] + 1.0
for j in range(0, 8):
with T.block():
T.reads(B[j])
T.writes(C[i * 8 + j])
C[i * 8 + j] = B[j] * 2.0
@T.prim_func
def complex_func(a: T.handle, c: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (8, 8), "float32")
C = T.match_buffer(c, (8, 8), "float32")
for i in range(0, 8):
with T.block():
T.reads(A[0, 8])
T.writes(C[0, 8])
B = T.alloc_buffer((8, 8), "float32")
for j in range(0, 4):
with T.block():
D = T.alloc_buffer((8, 8), "float32")
T.reads(A[i, j])
T.writes(B[i, j])
for k in range(4, 8):
D[k, j] = 1.0
for k in range(2, 4):
B[i, j] = A[i, j] + D[k, j]
for j in range(3, 5):
with T.block():
T.reads(B[i, j])
T.writes(C[i, j])
C[i, j] = B[i, j]
for j in range(6, 8):
with T.block():
T.reads(B[i, j])
T.writes(C[i, j])
C[i, j] = B[i, j]
@T.prim_func
def compacted_complex_func(a: T.handle, c: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (8, 8), "float32")
C = T.match_buffer(c, (8, 8), "float32")
for i in range(0, 8):
with T.block():
T.reads(A[0, 8])
T.writes(C[0, 8])
B = T.alloc_buffer((1, 8), "float32")
for j in range(0, 4):
with T.block():
D = T.alloc_buffer((6, 1), "float32")
T.reads(A[i, j])
T.writes(B[0, j])
for k in range(4, 8):
D[k - 2, 0] = 1.0
for k in range(2, 4):
B[0, j] = A[i, j] + D[k - 2, 0]
for j in range(3, 5):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j]
for j in range(6, 8):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j]
@T.prim_func
def match_buffer_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
C = T.match_buffer(c, (16, 16))
for i in range(0, 16):
with T.block():
A0 = T.match_buffer(A[i, 0:16], (16))
C0 = T.match_buffer(C[i, 0:16], (16))
B = T.alloc_buffer((16, 16))
with T.block():
B0 = T.match_buffer(B[i, 0:16], (16))
for j in range(0, 16):
with T.block():
A1 = T.match_buffer(A0[j], ())
B1 = T.match_buffer(B0[j], ())
B1[()] = A1[()] + 1.0
for j in range(0, 16):
with T.block():
C1 = T.match_buffer(C0[j], ())
B2 = T.match_buffer(B[i, j], ())
C1[()] = B2[()] * 2.0
@T.prim_func
def compacted_match_buffer_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
C = T.match_buffer(c, (16, 16))
for i in range(0, 16):
with T.block():
A0 = T.match_buffer(A[i, 0:16], (16))
C0 = T.match_buffer(C[i, 0:16], (16))
B = T.alloc_buffer((1, 16))
with T.block():
B0 = T.match_buffer(B[0, 0:16], (16))
for j in range(0, 16):
with T.block():
A1 = T.match_buffer(A0[j], ())
B1 = T.match_buffer(B0[j], ())
B1[()] = A1[()] + 1.0
for j in range(0, 16):
with T.block():
C1 = T.match_buffer(C0[j], ())
B2 = T.match_buffer(B[0, j], ())
C1[()] = B2[()] * 2.0
@T.prim_func
def storage_align_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((16, 16), "float32")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]})
B[i, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[i, j])
T.writes(C[i, j])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def compacted_storage_align_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((1, 16), strides=(31, 1), dtype="float32")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[0, j])
T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]})
B[0, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j] * 2.0
@T.prim_func
def padding_pattern_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (20, 20), "float32")
with T.block():
B = T.alloc_buffer((20, 20), dtype="float32")
for i, j in T.grid(16, 16):
with T.block():
B[i, j] = A[i, j]
for i, j in T.grid(20, 20):
with T.block():
C[i, j] = T.if_then_else(
2 <= i and i < 18 and 2 <= j and j < 18,
B[i - 2, j - 2],
0.0,
dtype="float32",
)
@T.prim_func
def compacted_padding_pattern_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 16], dtype="float32")
C = T.match_buffer(c, [20, 20], dtype="float32")
with T.block():
B = T.alloc_buffer([16, 16], dtype="float32")
for i, j in T.grid(16, 16):
with T.block():
B[i, j] = A[i, j]
for i, j in T.grid(20, 20):
with T.block():
C[i, j] = T.if_then_else(
2 <= i and i < 18 and 2 <= j and j < 18, B[i - 2, j - 2], 0.0, dtype="float32"
)
@T.prim_func
def padding_pattern_inlined(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for h, w in T.grid(224, 224):
with T.block("cache"):
cache[h, w] = X[h, w]
for h, w, kh, kw in T.grid(224, 224, 3, 3):
with T.block("compute"):
Y[h, w] = T.max(
Y[h, w],
T.if_then_else(
T.likely(1 <= h + kh, dtype="bool")
and T.likely(h + kh < 225, dtype="bool")
and T.likely(1 <= w + kw, dtype="bool")
and T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1],
0.0,
dtype="float32",
),
)
@T.prim_func
def compacted_padding_pattern_inlined(
X: T.Buffer[(224, 224), "float32"], Y: T.Buffer[(224, 224), "float32"]
) -> None:
cache = T.alloc_buffer([224, 224], dtype="float32")
for h, w in T.grid(224, 224):
with T.block("cache"):
cache[h, w] = X[h, w]
for h, w, kh, kw in T.grid(224, 224, 3, 3):
with T.block("compute"):
Y[h, w] = T.max(
Y[h, w],
T.if_then_else(
T.likely(1 <= h + kh, dtype="bool")
and T.likely(h + kh < 225, dtype="bool")
and T.likely(1 <= w + kw, dtype="bool")
and T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1],
0.0,
dtype="float32",
),
)
@T.prim_func
def mem_access_in_branch_func(a: T.handle) -> None:
A = T.match_buffer(a, (224, 224), "float32")
with T.block():
B1 = T.alloc_buffer((224, 224), dtype="float32")
B2 = T.alloc_buffer((224, 224), dtype="float32")
B3 = T.alloc_buffer((224, 224), dtype="float32")
B4 = T.alloc_buffer((224, 224), dtype="float32")
for i in range(0, 224):
for j in range(0, 224):
with T.block():
if i < 112 and j < 112:
B1[i, j] = A[i, j] * 2.0
else:
B2[i, j] = A[i, j] + 3.0
for i in range(0, 224):
for j in range(0, 224):
with T.block():
if i < 112 or j < 112:
B3[i, j] = A[i, j] * 2.0
else:
B4[i, j] = A[i, j] + 3.0
@T.prim_func
def compacted_mem_access_in_branch_func(a: T.handle) -> None:
A = T.match_buffer(a, [224, 224], dtype="float32")
with T.block():
B1 = T.alloc_buffer([112, 112], dtype="float32")
B2 = T.alloc_buffer([224, 224], dtype="float32")
B3 = T.alloc_buffer([224, 224], dtype="float32")
B4 = T.alloc_buffer([112, 112], dtype="float32")
for i, j in T.grid(224, 224):
with T.block():
if i < 112 and j < 112:
B1[i, j] = A[i, j] * 2.0
else:
B2[i, j] = A[i, j] + 3.0
for i, j in T.grid(224, 224):
with T.block():
if i < 112 or j < 112:
B3[i, j] = A[i, j] * 2.0
else:
B4[i - 112, j - 112] = A[i, j] + 3.0
@T.prim_func
def opaque_access_annotated_func(a: T.handle) -> None:
A = T.match_buffer(a, (1024,), "float32")
with T.block():
B = T.alloc_buffer((1024,), dtype="float32")
C = T.alloc_buffer((1024,), dtype="float32")
for i in range(0, 512):
with T.block():
# no annotation, opaque access will cover full region
T.reads([])
T.writes([])
T.evaluate(T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32"))
B[i] = A[i]
with T.block():
# treat opaque access only access annotated regions, even if
# they are not compatible with actual buffer accesses.
T.reads([B[i]])
T.writes([C[i : i + 9]])
T.evaluate(T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32"))
C[i] = B[i]
@T.prim_func
def compacted_opaque_access_annotated_func(a: T.handle) -> None:
A = T.match_buffer(a, (1024,), "float32")
with T.block():
B = T.alloc_buffer((1024,), dtype="float32")
C = T.alloc_buffer((520,), dtype="float32")
for i in range(0, 512):
with T.block():
# no annotation, opaque access will cover full region
T.reads([])
T.writes([])
T.evaluate(T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32"))
B[i] = A[i]
with T.block():
# treat opaque access only access annotated regions, even if
# they are not compatible with actual buffer accesses.
T.reads([B[i]])
T.writes([C[i : i + 9]])
T.evaluate(T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32"))
C[i] = B[i]
@T.prim_func
def sparse_read_cache(
A_data: T.Buffer[(819,), "float32"],
B: T.Buffer[(128,), "float32"],
A_indptr: T.Buffer[(129,), "int32"],
A_indices: T.Buffer[(819,), "int32"],
) -> None:
for i in T.serial(128):
with T.block("rowsum_outer"):
T.reads(
A_indptr[i : i + 1],
A_data[A_indptr[i] + 0 : A_indptr[i] + (A_indptr[i + 1] - A_indptr[i])],
)
T.writes(B[i])
with T.block("rowsum_init"):
T.reads()
T.writes(B[i])
B[i] = T.float32(0)
for k in T.serial(A_indptr[i + 1] - A_indptr[i]):
with T.block():
T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i])
T.writes(B[i])
A_data_local = T.alloc_buffer([819], dtype="float32", scope="local")
with T.block("A_data_cache_read"):
T.reads(A_indptr[i], A_data[A_indptr[i] + k])
T.writes(A_data_local[A_indptr[i] + k])
A_data_local[A_indptr[i] + k] = A_data[A_indptr[i] + k]
with T.block("rowsum_inner"):
T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k])
T.writes(B[i])
B[i] = B[i] + A_data_local[A_indptr[i] + k]
@T.prim_func
def compacted_sparse_read_cache(
A_data: T.Buffer[(819,), "float32"],
B: T.Buffer[(128,), "float32"],
A_indptr: T.Buffer[(129,), "int32"],
A_indices: T.Buffer[(819,), "int32"],
) -> None:
for i in T.serial(128):
with T.block("rowsum_outer"):
T.reads(
A_indptr[i : i + 1],
A_data[A_indptr[i] + 0 : A_indptr[i] + 0 + (A_indptr[i + 1] - A_indptr[i])],
)
T.writes(B[i])
with T.block("rowsum_init"):
T.reads()
T.writes(B[i])
B[i] = T.float32(0)
for k in T.serial(A_indptr[i + 1] - A_indptr[i]):
with T.block():
T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i])
T.writes(B[i])
A_data_local = T.alloc_buffer([1], dtype="float32", scope="local")
with T.block("A_data_cache_read"):
T.reads(A_indptr[i], A_data[A_indptr[i] + k])
T.writes(A_data_local[T.min(A_indptr[i] + k, 0)])
A_data_local[T.min(A_indptr[i] + k, 0)] = A_data[A_indptr[i] + k]
with T.block("rowsum_inner"):
T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k])
T.writes(B[i])
B[i] = B[i] + A_data_local[T.min(A_indptr[i] + k, 0)]
@T.prim_func
def narrow_shape(A: T.Buffer[(10,), "float32"], B: T.Buffer[(10,), "float32"]) -> None:
B_cache = T.alloc_buffer(10, "float32")
for j in T.serial(3):
for k in T.serial(4):
with T.block("B_cache"):
T.where(j * 4 + k < 10)
B_cache[j * 4 + k] = B[j]
for i in T.serial(10):
A[i] = B_cache[i] + T.float32(1)
@T.prim_func
def compacted_narrow_shape(A: T.Buffer[(10,), "float32"], B: T.Buffer[(10,), "float32"]) -> None:
# body
# with T.block("root")
B_cache = T.alloc_buffer([10], dtype="float32")
for j, k in T.grid(3, 4):
with T.block("B_cache"):
T.where(j * 4 + k < 10)
T.reads(B[j])
T.writes(B_cache[j * 4 + k])
B_cache[j * 4 + k] = B[j]
for i in T.serial(10):
A[i] = B_cache[i] + T.float32(1)
def test_elementwise():
_check(elementwise_func, compacted_elementwise_func)
def test_unschedulable_block():
_check(unschedulable_func, unschedulable_func) # changes nothing
def test_param_access():
_check(param_buffer_access_func, param_buffer_access_func) # changes nothing
def test_shared_mem():
_check(shared_mem_func, compacted_shared_mem_func)
def test_warp_mem():
_check(warp_mem_func, compacted_warp_mem_func)
def test_symbolic():
_check(symbolic_func, compacted_symbolic_func)
def test_complex():
_check(complex_func, compacted_complex_func)
def test_match_buffer():
_check(match_buffer_func, compacted_match_buffer_func)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.CompactBufferAllocation()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # CompactBufferAllocation should do nothing on TE
def test_storage_align():
_check(storage_align_func, compacted_storage_align_func)
def test_padding_pattern():
_check(padding_pattern_func, compacted_padding_pattern_func)
def test_padding_pattern_inlined():
_check(padding_pattern_inlined, compacted_padding_pattern_inlined)
def test_mem_access_in_branch_func():
_check(mem_access_in_branch_func, compacted_mem_access_in_branch_func)
def test_opaque_access_annotated_func():
_check(opaque_access_annotated_func, compacted_opaque_access_annotated_func)
def test_sparse_read_cache():
_check(sparse_read_cache, compacted_sparse_read_cache)
def test_narrow_shape():
_check(narrow_shape, compacted_narrow_shape)
def test_compact_with_let_binding():
@T.prim_func
def func_with_let_binding():
A = T.alloc_buffer((64, 8), "float32")
B = T.alloc_buffer((64, 8), "float32")
C = T.alloc_buffer((8, 8), "float32")
for rk in range(64):
for rii, rjj in T.grid(8, 8):
C[rii, rjj] = T.float32(0)
for riijj in T.serial(8 * 8):
rii: T.int32 = riijj // 8
rjj: T.int32 = riijj % 8
C[rii, rjj] += A[rk, rii] * B[rk, rjj]
_check(func_with_let_binding, func_with_let_binding)
@T.prim_func
def func_with_non_index_let_binding():
A = T.alloc_buffer((64), "float32")
x1 = T.call_extern("get", dtype="float16")
x2 = T.call_extern("get", dtype="float32")
x3 = T.call_extern("get", dtype="float64")
x4 = T.call_extern("get", dtype="uint8")
x5 = T.call_extern("get", dtype="int32x16")
x6 = T.call_extern("get", dtype="handle")
x7 = T.call_extern("get", dtype="")
for rk in range(64):
A[rk] = T.call_extern("load_ptr", x1, x2, x3, x4, x5, x6, x7, dtype="float32")
_check(func_with_non_index_let_binding, func_with_non_index_let_binding)
def test_compact_spatial_tiled_pad_and_pooling():
@T.prim_func
def spatial_tiled_pad_and_pooling(
X: T.Buffer[(64, 112, 112), "int32"], Y: T.Buffer[(64, 56, 56), "int32"]
) -> None:
for h_o, w_o in T.grid(14, 14):
with T.block():
X_cache = T.alloc_buffer([112, 112, 64], dtype="int32")
for ax0, ax1, ax2 in T.grid(64, 9, 9):
with T.block("cache"):
T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2)
T.reads(X[ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2])
T.writes(X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0])
X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0] = X[
ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2
]
for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64):
with T.block("compute"):
T.reads(
X_cache[(h_o * 4 + h_i) * 2 + kh - 1, (w_o * 4 + w_i) * 2 + kw - 1, c]
)
T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c])
if kh == 0 and kw == 0:
Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0
Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max(
Y[h_o * 4 + h_i, w_o * 4 + w_i, c],
T.if_then_else(
T.likely(1 <= (h_o * 4 + h_i) * 2 + kh, dtype="bool")
and T.likely((h_o * 4 + h_i) * 2 + kh < 113, dtype="bool")
and T.likely(1 <= (w_o * 4 + w_i) * 2 + kw, dtype="bool")
and T.likely((w_o * 4 + w_i) * 2 + kw < 113, dtype="bool"),
X_cache[
(h_o * 4 + h_i) * 2 + kh - 1,
(w_o * 4 + w_i) * 2 + kw - 1,
c,
],
0,
dtype="int32",
),
)
@T.prim_func
def compacted_spatial_tiled_pad_and_pooling(
X: T.Buffer[(64, 112, 112), "int32"], Y: T.Buffer[(64, 56, 56), "int32"]
) -> None:
for h_o, w_o in T.grid(14, 14):
with T.block():
T.reads(X[0:64, h_o * 8 - 1 : h_o * 8 + 8, w_o * 8 - 1 : w_o * 8 + 8])
T.writes(Y[h_o * 4 : h_o * 4 + 4, w_o * 4 : w_o * 4 + 4, 0:64])
X_cache = T.alloc_buffer([9, 9, 64], dtype="int32")
for ax0, ax1, ax2 in T.grid(64, 9, 9):
with T.block("cache"):
T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2)
T.reads(X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1])
T.writes(
X_cache[
h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1,
w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1,
ax0,
]
)
X_cache[
h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1,
w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1,
ax0,
] = X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1]
for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64):
with T.block("compute"):
T.reads(
X_cache[
h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1,
w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1,
c,
]
)
T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c])
if kh == 0 and kw == 0:
Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0
Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max(
Y[h_o * 4 + h_i, w_o * 4 + w_i, c],
T.if_then_else(
T.likely(1 <= h_o * 8 + h_i * 2 + kh, dtype="bool")
and T.likely(1 <= w_o * 8 + w_i * 2 + kw, dtype="bool"),
X_cache[
h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1,
w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1,
c,
],
0,
dtype="int32",
),
)
_check(spatial_tiled_pad_and_pooling, compacted_spatial_tiled_pad_and_pooling)
def test_complex_case_1():
"""Meta-schedule matmul case for compact shared A, B matrix"""
# fmt: off
@T.prim_func
def func(A: T.Buffer[(960, 770), "float32"], B: T.Buffer[(770, 2304), "float32"], C: T.Buffer[(960, 2304), "float32"]) -> None:
for bx in T.thread_binding(144, thread="blockIdx.x"):
for vx in T.thread_binding(2, thread="vthread.x"):
for tx_p in T.thread_binding(256, thread="threadIdx.x"):
with T.block():
for k_0 in T.serial(193):
with T.block():
A_shared = T.alloc_buffer([960, 770], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([770, 2304], dtype="float32", scope="shared")
for _u in T.serial(1):
for tx in T.thread_binding(256, thread="threadIdx.x"):
for vec in T.vectorized(3):
with T.block("A_shared"):
T.where(bx // 18 * 128 + ((_u * 256 + tx) * 3 + vec) // 4 < 960 and k_0 * 4 + ((_u * 256 + tx) * 3 + vec) % 4 < 770 and (_u * 256 + tx) * 3 + vec < 512)
A_shared[bx // 18 * 128 + (_u * 768 + tx * 3 + vec) // 4, k_0 * 4 + (_u * 768 + tx * 3 + vec) % 4] = A[bx // 18 * 128 + (_u * 768 + tx * 3 + vec) // 4, k_0 * 4 + (_u * 768 + tx * 3 + vec) % 4]
for _u in T.serial(1):
for tx in T.thread_binding(256, thread="threadIdx.x"):
for vec in T.vectorized(4):
with T.block("B_shared"):
T.where(k_0 * 4 + ((_u * 256 + tx) * 4 + vec) // 128 < 770 and (_u * 256 + tx) * 4 + vec < 512)
B_shared[k_0 * 4 + (_u * 1024 + tx * 4 + vec) // 128, bx % 18 * 128 + (_u * 1024 + tx * 4 + vec) % 128] = B[k_0 * 4 + (_u * 1024 + tx * 4 + vec) // 128, bx % 18 * 128 + (_u * 1024 + tx * 4 + vec) % 128]
for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2):
with T.block("update_update"):
C[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4] = C[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4] + A_shared[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, (k_0 + k_1) * 4 + k_2] * B_shared[(k_0 + k_1) * 4 + k_2, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4]
@T.prim_func
def compacted_func(A: T.Buffer[(960, 770), "float32"], B: T.Buffer[(770, 2304), "float32"], C: T.Buffer[(960, 2304), "float32"]) -> None:
for bx in T.thread_binding(144, thread="blockIdx.x"):
for vx in T.thread_binding(2, thread="vthread.x"):
for tx_p in T.thread_binding(256, thread="threadIdx.x"):
with T.block():
for k_0 in T.serial(193):
with T.block():
A_shared = T.alloc_buffer([128, 4], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([4, 128], dtype="float32", scope="shared")
for v_u in T.serial(1):
for tx in T.thread_binding(256, thread="threadIdx.x"):
for vec in T.vectorized(3):
with T.block("A_shared"):
T.where(bx // 18 * 128 + (tx * 3 + vec) // 4 < 960 and k_0 * 4 + (tx * 3 + vec) % 4 < 770 and tx * 3 + vec < 512)
A_shared[(tx * 3 + vec) // 4, (tx * 3 + vec) % 4] = A[bx // 18 * 128 + (tx * 3 + vec) // 4, k_0 * 4 + (tx * 3 + vec) % 4]
for v_u in T.serial(1):
for tx in T.thread_binding(256, thread="threadIdx.x"):
for vec in T.vectorized(4):
with T.block("B_shared"):
T.where(k_0 * 4 + tx // 32 < 770 and tx * 4 + vec < 512)
B_shared[tx // 32, tx % 32 * 4 + vec] = B[k_0 * 4 + tx // 32, bx % 18 * 128 + tx % 32 * 4 + vec]
for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2):
with T.block("update_update"):
C[bx // 18 * 128 + tx_p // 32 * 16 + i_3 * 2 + i_4, bx % 18 * 128 + vx * 64 + tx_p % 32 * 2 + j_4] = C[bx // 18 * 128 + tx_p // 32 * 16 + i_3 * 2 + i_4, bx % 18 * 128 + vx * 64 + tx_p % 32 * 2 + j_4] + A_shared[tx_p // 32 * 16 + i_3 * 2 + i_4, k_2] * B_shared[k_2, vx * 64 + tx_p % 32 * 2 + j_4]
# fmt: on
_check(func, compacted_func)
def test_compact_dependent_buffer_indices():
"""Check the upper bound on different indices could be independently estimated."""
@T.prim_func
def diagonal_access():
for i in range(8):
with T.block():
A = T.alloc_buffer((256, 256), "float32")
for j, k in T.grid(8, 8):
with T.block():
T.where(j * 8 + k < 60)
A[i * 64 + j * 8 + k, i * 64 + j * 8 + k] = 1.0
@T.prim_func
def diagonal_access_compacted() -> None:
for i in T.serial(8):
with T.block():
A = T.alloc_buffer([60, 60], dtype="float32")
for j, k in T.grid(8, 8):
with T.block():
T.where(j * 8 + k < 60)
A[j * 8 + k, j * 8 + k] = 1.0
_check(diagonal_access, diagonal_access_compacted)
def test_compact_dependent_buffer_indices_of_packed_matmul():
"""Check the outer dimension of the packed M-dim should be compacted to 1 wrt split condition."""
@T.prim_func
def nonuniform_packed_matmul_write_cache(
A: T.Buffer[(1020, 64), "float32"],
B: T.Buffer[(1000, 64), "float32"],
C: T.Buffer[(1020, 1000), "float32"],
):
for i0, i1 in T.grid(4, 1):
with T.block():
C_local2 = T.alloc_buffer([4, 1, 16, 1000, 16], dtype="float32", scope="local")
C_local1 = T.alloc_buffer([1020, 1000], dtype="float32", scope="local")
for ax0, ax1, ax2 in T.grid(255, 1000, 64):
with T.block("matmul"):
if ax2 == 0:
C_local1[i0 * 255 + ax0, ax1] = 0
C_local1[i0 * 255 + ax0, ax1] = (
C_local1[i0 * 255 + ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2]
)
for ax0, ax1 in T.grid(255, 1000):
with T.block("st1"):
C_local2[
(i0 * 255 + ax0) // 255,
0,
(i0 * 255 + ax0) % 255 // 16,
ax1,
(i0 * 255 + ax0) % 255 % 16,
] = C_local1[i0 * 255 + ax0, ax1]
for ax0, ax1, ax2 in T.grid(16, 16, 1000):
with T.block("st2"):
T.where(ax0 * 16 + ax1 < 255)
C[i0 * 255 + (ax0 * 16 + ax1), i1 * 1000 + ax2] = C_local2[
(i0 * 255 + ax0 * 16 + ax1) // 255,
0,
(i0 * 255 + ax0 * 16 + ax1) % 255 // 16,
i1 * 1000 + ax2,
(i0 * 255 + ax0 * 16 + ax1) % 255 % 16,
]
@T.prim_func
def nonuniform_packed_matmul_write_cache_compacted(
A: T.Buffer[(1020, 64), "float32"],
B: T.Buffer[(1000, 64), "float32"],
C: T.Buffer[(1020, 1000), "float32"],
) -> None:
for i0, i1 in T.grid(4, 1):
with T.block():
C_local2 = T.alloc_buffer([1, 1, 15, 1000, 16], dtype="float32", scope="local")
C_local1 = T.alloc_buffer([255, 1000], dtype="float32", scope="local")
for ax0, ax1, ax2 in T.grid(255, 1000, 64):
with T.block("matmul"):
if ax2 == 0:
C_local1[ax0, ax1] = 0
C_local1[ax0, ax1] = (
C_local1[ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2]
)
for ax0, ax1 in T.grid(255, 1000):
with T.block("st1"):
C_local2[0, 0, ax0 // 16, ax1, ax0 % 16] = C_local1[ax0, ax1]
for ax0, ax1, ax2 in T.grid(16, 16, 1000):
with T.block("st2"):
T.where(ax0 * 16 + ax1 < 255)
C[i0 * 255 + ax0 * 16 + ax1, ax2] = C_local2[
(ax0 * 16 + ax1) // 255,
0,
(ax0 * 16 + ax1) % 255 // 16,
ax2,
(ax0 * 16 + ax1) % 255 % 16,
]
_check(nonuniform_packed_matmul_write_cache, nonuniform_packed_matmul_write_cache_compacted)
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_convert_blocks_to_opaque.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir, te
from tvm.script import tir as T
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.ConvertBlocksToOpaque()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed)
@T.prim_func
def elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer((16, 16), "float32")
for j in range(0, 16):
with T.block():
vi = T.axis.S(16, i)
vj = T.axis.S(16, j)
B[vi, vj] = A[vi, vj] + 1.0
for j in range(0, 16):
with T.block():
vi = T.axis.S(16, i)
vj = T.axis.S(16, j)
C[vi, vj] = B[vi, vj] * 2.0
@T.prim_func
def substituted_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer([16, 16], "float32")
for j in range(0, 16):
with T.block():
T.reads([A[i, j]])
T.writes([B[i, j]])
B[i, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads([B[i, j]])
T.writes([C[i, j]])
C[i, j] = B[i, j] * 2.0
def test_elementwise():
_check(elementwise_func, substituted_elementwise_func)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.ConvertBlocksToOpaque()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod) # ConvertBlocksToOpaque should do nothing on TE
class TestErrorIfPredicateUsesBlockVariables(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.ConvertBlocksToOpaque()
def before(A: T.Buffer[8, "int32"]):
for i in T.serial(8):
with T.block():
vi = T.axis.remap("S", [i])
T.where(vi < 6)
T.evaluate(0)
expected = tvm.TVMError
if __name__ == "__main__":
tvm.testing.main()
| https://github.com/zk-ml/tachikoma |
tests/python/unittest/test_tir_transform_convert_for_loops_serial.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
# fmt: off
@T.prim_func
def fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2(placeholder_30: T.handle, placeholder_31: T.handle, placeholder_32: T.handle, T_cast_8: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", "tir.noalias": True})
placeholder_33 = T.match_buffer(placeholder_30, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_34 = T.match_buffer(placeholder_31, [3072], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_35 = T.match_buffer(placeholder_32, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_9 = T.match_buffer(T_cast_8, [12544], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_3 = T.decl_buffer([150528], "int16")
for i0_i1_fused_3 in T.parallel(0, 28):
for i2_3, i3_3 in T.grid(28, 192):
PaddedInput_3[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3) ] = placeholder_33[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)]
for ax0_ax1_fused_ax2_fused_3 in T.parallel(0, 784):
for ax3_2 in T.serial(0, 16):
Conv2dOutput_3 = T.decl_buffer([1], "int32")
Conv2dOutput_3[0] = 0
for rc_3 in T.serial(0, 192):
Conv2dOutput_3[0] = (Conv2dOutput_3[0] + (T.cast(PaddedInput_3[((ax0_ax1_fused_ax2_fused_3*192) + rc_3)], "int32")*T.cast(placeholder_34[((rc_3*16) + ax3_2)], "int32")))
T_cast_9[((ax0_ax1_fused_ax2_fused_3*16) + ax3_2)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_3[0] + placeholder_35[ax3_2]), 1764006585, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
# fmt: on
def test_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2():
primfunc = fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2
mod = tvm.IRModule.from_expr(primfunc)
mod = tvm.tir.transform.ConvertForLoopsToSerial()(mod)
def verify_serial_loops(stmt):
if isinstance(stmt, tvm.tir.For):
assert stmt.kind == tvm.tir.ForKind.SERIAL
for _, primfunc in mod.functions.items():
stmt_functor.post_order_visit(primfunc.body, verify_serial_loops)
if __name__ == "__main__":
pytest.main([__file__])
| https://github.com/zk-ml/tachikoma |