Genghan commited on
Commit
1bb3ed7
·
verified ·
1 Parent(s): 1e17612

Add dataset card and NKIBench contents

Browse files
Files changed (45) hide show
  1. README.md +151 -3
  2. kernel_wrapper.py +233 -0
  3. kernels/adamw_M10944_N2048_0.py +66 -0
  4. kernels/add_rmsnorm_matmul_M4096_N2048_K1024_0.py +80 -0
  5. kernels/bmm_B16_M4096_K64_N4096_0.py +47 -0
  6. kernels/bmm_softmax_B16_K64_M4096_N4096_0.py +75 -0
  7. kernels/gqa_full_B1_N4096_QH16_KH8_D128_0.py +115 -0
  8. kernels/lora_M4096_N12288_K5120_R128_0.py +123 -0
  9. kernels/mamba_M7168_C256_S16_0.py +68 -0
  10. kernels/matmul_M4096_N12288_K5120_0.py +53 -0
  11. kernels/matmul_add_rmsnorm_M4096_N2048_K2048_0.py +48 -0
  12. kernels/rmsnorm_matmul_M4096_N2048_K1024_0.py +75 -0
  13. kernels/rope_single_freq_apply_B1_H64_N4096_D128_0.py +71 -0
  14. kernels/silu_M4096_N7168_0.py +38 -0
  15. kernels/swiglu_M4096_N3072_K1024_0.py +113 -0
  16. kernels/transpose_matmul_M4096_K2048_N10944_0.py +42 -0
  17. reference/adamw_M10944_N2048_numpy_1.py +49 -0
  18. reference/add_rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py +48 -0
  19. reference/bmm_B16_M4096_K64_N4096_numpy_1.py +42 -0
  20. reference/bmm_softmax_B16_K64_M4096_N4096_numpy_1.py +46 -0
  21. reference/gqa_full_B1_N4096_QH16_KH8_D128_numpy_2.py +58 -0
  22. reference/lora_M4096_N12288_K5120_R128_numpy_1.py +48 -0
  23. reference/mamba_M7168_C256_S16_numpy_1.py +44 -0
  24. reference/matmul_M4096_N12288_K5120_numpy_2.py +41 -0
  25. reference/matmul_add_rmsnorm_M4096_N2048_K2048_numpy_1.py +42 -0
  26. reference/rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py +54 -0
  27. reference/rope_single_freq_apply_B1_H64_N4096_D128_numpy_1.py +27 -0
  28. reference/silu_M4096_N7168_numpy_0.py +38 -0
  29. reference/swiglu_M4096_N3072_K1024_numpy_2.py +48 -0
  30. reference/transpose_matmul_M4096_K2048_N10944_numpy_1.py +42 -0
  31. seeds/adamw.yaml +24 -0
  32. seeds/add_rmsnorm_matmul.yaml +25 -0
  33. seeds/bmm.yaml +16 -0
  34. seeds/bmm_softmax.yaml +20 -0
  35. seeds/gqa_full.yaml +32 -0
  36. seeds/lora.yaml +20 -0
  37. seeds/mamba.yaml +25 -0
  38. seeds/matmul.yaml +15 -0
  39. seeds/matmul_add_rmsnorm.yaml +20 -0
  40. seeds/rmsnorm_matmul.yaml +30 -0
  41. seeds/rope_single_freq_apply.yaml +23 -0
  42. seeds/silu.yaml +13 -0
  43. seeds/swiglu.yaml +20 -0
  44. seeds/transpose_matmul.yaml +15 -0
  45. summary.json +258 -0
README.md CHANGED
@@ -1,3 +1,151 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ tags:
6
+ - nki
7
+ - aws-neuron
8
+ - trainium
9
+ - inferentia
10
+ - kernel
11
+ - gpu-kernel
12
+ - benchmark
13
+ - code
14
+ pretty_name: NKIBench
15
+ size_categories:
16
+ - n<1K
17
+ ---
18
+
19
+ # NKIBench
20
+
21
+ NKIBench is a benchmark of AWS [Neuron Kernel Interface (NKI)](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/nki/index.html) kernels paired with NumPy reference implementations. Each task provides a specification, a ground-truth NumPy forward pass, and an optimized NKI kernel targeting AWS Trainium / Inferentia devices, together with tooling to compile, check numerical correctness, and measure on-device latency.
22
+
23
+ ## Dataset structure
24
+
25
+ ```
26
+ NKIBench/
27
+ ├── seeds/ # YAML task specifications (shape-agnostic templates)
28
+ ├── reference/ # NumPy reference implementations with concrete shapes
29
+ ├── kernels/ # Optimized NKI kernels (one per case)
30
+ ├── summary.json # Index mapping task → case → {seed, reference, kernel}
31
+ └── kernel_wrapper.py # Profiler: compile, correctness check, latency benchmark
32
+ ```
33
+
34
+ ### `summary.json`
35
+
36
+ The canonical index. Each entry maps a task name to one or more parameter cases and the files that implement them:
37
+
38
+ ```json
39
+ {
40
+ "matmul": {
41
+ "seed": "./seeds/matmul.yaml",
42
+ "cases": {
43
+ "3": {
44
+ "values": {"K": 5120, "M": 4096, "N": 12288},
45
+ "impls": [{
46
+ "task": "./reference/matmul_M4096_N12288_K5120_numpy_2.py",
47
+ "kernel": "./kernels/matmul_M4096_N12288_K5120_0.py"
48
+ }]
49
+ }
50
+ }
51
+ }
52
+ }
53
+ ```
54
+
55
+ ### `seeds/*.yaml`
56
+
57
+ A shape-agnostic specification: the task name, its symbolic parameters, an input generator, and a NumPy `forward` implementation.
58
+
59
+ ```yaml
60
+ test_name: matmul
61
+ parameters: [M, N, K]
62
+ input: |
63
+ lhs = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
64
+ rhs = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
65
+ return [lhs, rhs]
66
+ impl: |
67
+ def forward(lhs, rhs):
68
+ return np.matmul(lhs, rhs)
69
+ ```
70
+
71
+ ### `reference/*.py`
72
+
73
+ A shape-concrete NumPy reference. Exposes:
74
+
75
+ - `get_inputs()` — produces randomized numpy input tensors.
76
+ - `forward(*inputs)` — ground-truth computation.
77
+ - `transform_to_nki_inputs(inputs)` — reshapes numpy inputs into the tile layout the NKI kernel expects.
78
+ - `transform_nki_outputs(k_res, ref)` — reshapes kernel outputs back to reference layout.
79
+
80
+ ### `kernels/*.py`
81
+
82
+ Optimized NKI kernels using `neuronxcc.nki`. Each file defines a `kernel` (or `optimized_kernel`) function decorated with `@nki.jit`, written against tiled SBUF/PSUM/HBM memory on AWS Neuron hardware.
83
+
84
+ ## Task list
85
+
86
+ | Task | Category | Parameters (one sample case) |
87
+ |---|---|---|
88
+ | `matmul` | GEMM | M=4096, N=12288, K=5120 |
89
+ | `transpose_matmul` | GEMM | M=4096, K=2048, N=10944 |
90
+ | `bmm` | Batched GEMM | B=16, M=4096, K=64, N=4096 |
91
+ | `bmm_softmax` | Fused attention block | B=16, K=64, M=4096, N=4096 |
92
+ | `rmsnorm_matmul` | Fused norm + GEMM | M=4096, N=2048, K=1024 |
93
+ | `matmul_add_rmsnorm` | Fused GEMM + residual + norm | M=4096, N=2048, K=2048 |
94
+ | `add_rmsnorm_matmul` | Fused residual + norm + GEMM | M=4096, N=2048, K=1024 |
95
+ | `swiglu` | Activation | M=4096, N=3072, K=1024 |
96
+ | `silu` | Activation | M=4096, N=7168 |
97
+ | `gqa_full` | Grouped-query attention | B=1, N=4096, QH=16, KH=8, D=128 |
98
+ | `rope_single_freq_apply` | Rotary position embedding | B=1, H=64, N=4096, D=128 |
99
+ | `lora` | LoRA low-rank update | M=4096, N=12288, K=5120, R=128 |
100
+ | `adamw` | Optimizer step | M=10944, N=2048 |
101
+ | `mamba` | Mamba SSM block | M=7168, C=256, S=16 |
102
+
103
+ ## Usage
104
+
105
+ ```bash
106
+ # Clone the dataset
107
+ hf download Genghan/NKIBench --repo-type dataset --local-dir NKIBench
108
+ cd NKIBench
109
+ ```
110
+
111
+ ```python
112
+ # Profile one kernel on an AWS Neuron-enabled instance (e.g. trn1 / inf2).
113
+ # Requires: neuronx-cc, neuronx-runtime, and the `neuron-profile` CLI.
114
+ import json
115
+ from kernel_wrapper import NKIKernel
116
+
117
+ summary = json.load(open("summary.json"))
118
+ case = summary["matmul"]["cases"]["3"]["impls"][0]
119
+
120
+ k = NKIKernel(program_path=case["kernel"], base_numpy_path=case["task"])
121
+ result = k.profile(save_fields=["mac_count"])
122
+
123
+ print("compiled:", result.compiled)
124
+ print("correct :", result.correct)
125
+ print("latency :", result.metadata.get("latency"), "ms")
126
+ ```
127
+
128
+ `NKIKernel.profile()` compiles the kernel, validates numerical correctness against the NumPy reference over multiple random seeds (L2-norm relative tolerance `2e-5`), and benchmarks latency via `neuron-profile`. `float16` inside a kernel is rejected to avoid silent precision loss.
129
+
130
+ ## Hardware requirements
131
+
132
+ Running the kernels requires AWS Neuron hardware (Trainium / Inferentia2). The NumPy references in `reference/` and the YAML seeds are portable and can be inspected or extended anywhere.
133
+
134
+ ## Intended uses
135
+
136
+ - Evaluating code generation / kernel synthesis systems on AWS NKI.
137
+ - Studying handwritten-vs-generated kernel performance.
138
+ - Training/fine-tuning models to produce NKI code, using the seed + reference pair as the task specification.
139
+
140
+ ## Citation
141
+
142
+ If you use NKIBench in your work, please cite the repository.
143
+
144
+ ```bibtex
145
+ @misc{nkibench,
146
+ title = {NKIBench: A Benchmark of AWS NKI Kernels},
147
+ author = {Gong, Genghan},
148
+ year = {2026},
149
+ url = {https://huggingface.co/datasets/Genghan/NKIBench}
150
+ }
151
+ ```
kernel_wrapper.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, uuid, time, tempfile
2
+ import os
3
+ import traceback
4
+ import numpy as np
5
+ import sys
6
+ import re
7
+ from pathlib import Path
8
+ from pydantic import BaseModel, Field
9
+
10
+ class KernelProperties(BaseModel):
11
+ """
12
+ Single Kernel Execution
13
+ """
14
+ compiled: bool = False
15
+ correct: bool = False
16
+ runnable: bool = False
17
+ metadata: dict = Field(default_factory=dict)
18
+
19
+ # Function to load module from file path
20
+ def load_module_from_path(file_path):
21
+ parent_dir = str(Path(file_path).parent)
22
+ if parent_dir not in sys.path:
23
+ sys.path.append(parent_dir)
24
+ import importlib.util
25
+ spec = importlib.util.spec_from_file_location("module", file_path)
26
+ if spec is None or spec.loader is None:
27
+ raise ImportError(f"Could not load module from {file_path}")
28
+ module = importlib.util.module_from_spec(spec)
29
+ spec.loader.exec_module(module)
30
+ return module
31
+
32
+
33
+ def l2norm_allclose(v_k, v_r, rel_tol=1e-5):
34
+ return np.linalg.norm((v_k - v_r).astype(np.float64)) < rel_tol * np.linalg.norm(v_r.astype(np.float64))
35
+
36
+ def check_correctness_numpy(output_nki, output_task, res, rel_tol=2e-5):
37
+ # output_nki is a list
38
+ # output_task is a tuple or a single array
39
+ if not isinstance(output_task, tuple):
40
+ output_task_tuple = (output_task,)
41
+ else:
42
+ output_task_tuple = output_task
43
+ is_correct = True
44
+ if len(output_nki) != len(output_task_tuple):
45
+ res.metadata.setdefault("correctness_error", []).append(
46
+ f"Num outputs mismatch: nki={len(output_nki)} vs ref={len(output_task_tuple)}"
47
+ )
48
+ res.correct = False
49
+ return
50
+ for i, (v_k, v_r) in enumerate(zip(output_nki, output_task_tuple)):
51
+ if hasattr(v_r, "shape") and hasattr(v_k, "shape"):
52
+ if v_k.shape != v_r.shape:
53
+ res.metadata.setdefault("correctness_error", []).append(f"Output {i} shape mismatch, expected {v_r.shape}, got {v_k.shape}; ")
54
+ is_correct = False
55
+ if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
56
+ max_diff = np.amax(np.abs(v_k - v_r))
57
+ avg_diff = np.mean(np.abs(v_k - v_r))
58
+ max_rel_diff = np.amax(np.abs(v_k - v_r) / np.abs(v_r))
59
+ l2norm_diff = np.linalg.norm((v_k - v_r).astype(np.float64))
60
+ l2norm_ref = np.linalg.norm(v_r.astype(np.float64))
61
+ l2norm_rel_diff = l2norm_diff / l2norm_ref
62
+ res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, max diff {max_diff:.6f}, avg diff {avg_diff:.6f}, max rel diff {max_rel_diff:.6f}, l2norm diff {l2norm_diff:.6f}, l2norm ref {l2norm_ref:.6f}, l2norm rel diff {l2norm_rel_diff:.6f}")
63
+ is_correct = False
64
+ else:
65
+ # abs_diff = np.abs(v_k - v_r)
66
+ if np.issubdtype(type(v_r), np.floating) or np.issubdtype(type(v_k), np.floating):
67
+ if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
68
+ res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k};")
69
+ is_correct = False
70
+ else:
71
+ if v_k != v_r:
72
+ res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k}; ")
73
+ is_correct = False
74
+ res.correct = is_correct
75
+
76
+ def check_precision_and_correctness(program_path, output_nki, output_task, res, rel_tol):
77
+ with open(program_path, 'r') as f:
78
+ program_code = f.read()
79
+ # Remove all the comments
80
+ program_code = re.sub(r'#.*', '', program_code)
81
+ # If "bfloat16" or "float16" is used
82
+ if "float16" in program_code:
83
+ res.metadata["correctness_error"] = "Float16 is used in the program."
84
+ res.correct = False
85
+ return
86
+ check_correctness_numpy(output_nki, output_task, res, rel_tol=rel_tol)
87
+
88
+ import neuronxcc.nki as nki
89
+
90
+ def get_latency(nki_kernel_fn, nki_inputs, artifact_dir):
91
+ kernel_id = uuid.uuid4()
92
+ neff_path = os.path.join(artifact_dir, f"neff_{kernel_id}.neff")
93
+ ntff_path = os.path.join(artifact_dir, f"ntff_{kernel_id}.ntff")
94
+ nki.baremetal(
95
+ nki_kernel_fn,
96
+ save_neff_name=neff_path,
97
+ save_trace_name=ntff_path,
98
+ additional_compile_opt="--disable-dge --logical-nc-config=1"
99
+ )(*nki_inputs)
100
+ summary_profile_path = os.path.join(artifact_dir, f"profile_{kernel_id}.json")
101
+
102
+ summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
103
+ os.system(summary_profile_cmd)
104
+ summary = json.load(open(summary_profile_path, 'r'))
105
+ latency_ms = summary[next(iter(summary))]["total_time"] * 1e3
106
+ return latency_ms
107
+
108
+ def benchmark_latency(warmpup_iterations, benchmark_iterations, nki_kernel_fn, nki_inputs, artifact_dir):
109
+ for _ in range(warmpup_iterations):
110
+ nki.baremetal(
111
+ nki_kernel_fn,
112
+ additional_compile_opt="--disable-dge --logical-nc-config=1"
113
+ )(*nki_inputs)
114
+ latency_ms_list = []
115
+ for _ in range(benchmark_iterations):
116
+ latency_ms = get_latency(nki_kernel_fn, nki_inputs, artifact_dir)
117
+ latency_ms_list.append(latency_ms)
118
+ runtime_stats = {
119
+ "mean_ms": np.mean(latency_ms_list),
120
+ "min_ms": np.min(latency_ms_list),
121
+ "max_ms": np.max(latency_ms_list),
122
+ "rel_diffs": (np.max(latency_ms_list) - np.min(latency_ms_list)) / np.min(latency_ms_list)
123
+ }
124
+ return runtime_stats
125
+
126
+ class NKIKernel:
127
+ def __init__(self, program_path: str, base_numpy_path: str):
128
+ self.program_path = program_path
129
+ self.base_numpy_path = base_numpy_path
130
+ self.res = KernelProperties()
131
+ self.rel_tol = 2e-5
132
+ self.perf_tol = 0.01
133
+
134
+ def profile(self, save_fields: list[str] = []):
135
+
136
+ os.environ["NEURON_CC_FLAGS"] = "--auto-cast=none"
137
+ os.environ['NEURON_RT_NUM_CORES']= '1'
138
+ np.random.seed(42)
139
+ task_module = load_module_from_path(self.base_numpy_path)
140
+ task_fn = task_module.forward
141
+ task_np_input_fn = task_module.get_inputs
142
+ task_np_inputs = task_np_input_fn()
143
+ task_nki_output_fn = task_module.transform_nki_outputs
144
+ self.res = KernelProperties()
145
+ new_profile_name = f"nki_{uuid.uuid4()}"
146
+
147
+ with tempfile.TemporaryDirectory(dir="/tmp", prefix=f"{new_profile_name}_") as artifact_dir:
148
+ neff_path = os.path.join(artifact_dir, f"kernel_file.neff")
149
+ ntff_path = os.path.join(artifact_dir, f"kernel_profile.ntff")
150
+ try:
151
+ nki_kernel_module = load_module_from_path(self.program_path)
152
+ if hasattr(nki_kernel_module, "kernel"):
153
+ nki_kernel_fn = nki_kernel_module.kernel
154
+ elif hasattr(nki_kernel_module, "optimized_kernel"):
155
+ nki_kernel_fn = nki_kernel_module.optimized_kernel
156
+ else:
157
+ raise ValueError(f"No kernel function found in {self.program_path}")
158
+ # Get the transform_to_nki_inputs function
159
+ if hasattr(task_module, "transform_to_nki_inputs"):
160
+ task_nki_input_fn = task_module.transform_to_nki_inputs
161
+ else:
162
+ raise ValueError(f"No transform_to_nki_inputs function found in {self.program_path} or {self.base_numpy_path}")
163
+ nki_inputs = task_nki_input_fn(task_np_inputs)
164
+
165
+ output_nki = nki.baremetal(
166
+ nki_kernel_fn,
167
+ save_neff_name=neff_path,
168
+ save_trace_name=ntff_path,
169
+ additional_compile_opt="--disable-dge --logical-nc-config=1"
170
+ )(*nki_inputs)
171
+ self.res.compiled = True
172
+ self.res.runnable = True
173
+
174
+ except Exception as e:
175
+ print(f"Compilation failure. Error: {e}")
176
+ self.res.metadata["compilation_error"] = str(e)
177
+ self.res.metadata["compilation_traceback"] = traceback.format_exc()
178
+ return self.res
179
+
180
+ try:
181
+ for rnd_seed in [0, 21, 42, 63, 84]:
182
+ np.random.seed(rnd_seed)
183
+ task_np_inputs = task_np_input_fn()
184
+ nki_inputs = task_nki_input_fn(task_np_inputs)
185
+ output_task = task_fn(*task_np_inputs)
186
+ output_nki_raw = nki.baremetal(
187
+ nki_kernel_fn,
188
+ additional_compile_opt="--disable-dge --logical-nc-config=1"
189
+ )(*nki_inputs)
190
+ output_nki = task_nki_output_fn(output_nki_raw, output_task)
191
+ check_precision_and_correctness(self.program_path, output_nki, output_task, self.res, self.rel_tol)
192
+ if not self.res.correct:
193
+ break
194
+ except Exception as e:
195
+ print(f"Correct checking failure. Error: {e}")
196
+ self.res.metadata["correctness_error"] = str(e)
197
+ return self.res
198
+
199
+ if not self.res.correct:
200
+ return self.res
201
+
202
+ try:
203
+ runtime_stats = benchmark_latency(2, 10, nki_kernel_fn, nki_inputs, artifact_dir)
204
+ rel_diff = runtime_stats["rel_diffs"]
205
+ rel_diff_list = [rel_diff]
206
+ runtime_stats_list = [runtime_stats]
207
+ while rel_diff > self.perf_tol:
208
+ print(f"Retry: {self.program_path } at {len(rel_diff_list)}; rel_diffs: {rel_diff_list}")
209
+ time.sleep(1)
210
+
211
+ rel_diff_list.append(rel_diff)
212
+ runtime_stats_list.append(runtime_stats)
213
+ if len(rel_diff_list) > 2: # Just retry twice. In paper, we did 10 times.
214
+ break
215
+ runtime_stats = runtime_stats_list[np.argmin(rel_diff_list)]
216
+ self.res.metadata["latency"] = runtime_stats["mean_ms"]
217
+ self.res.metadata["min_ms"] = runtime_stats["min_ms"]
218
+ self.res.metadata["max_ms"] = runtime_stats["max_ms"]
219
+ self.res.metadata["rel_diffs"] = runtime_stats["rel_diffs"]
220
+
221
+ summary_profile_path = os.path.join(artifact_dir, f"{new_profile_name}_summary_profile.json")
222
+ summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
223
+ os.system(summary_profile_cmd)
224
+ summary = json.load(open(summary_profile_path, 'r'))
225
+ profile_result = summary[next(iter(summary))]
226
+ for field in save_fields:
227
+ if field in profile_result.keys():
228
+ self.res.metadata[field] = profile_result[field]
229
+ except Exception as e:
230
+ print(f"Benchmarking failure. Error: {e}")
231
+ self.res.metadata["benchmarking_error"] = traceback.format_exc()
232
+ return self.res
233
+ return self.res
kernels/adamw_M10944_N2048_0.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2, v3, v4):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v5 = nl.ndarray((10944, 2048), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v6 = nl.ndarray((nl.par_dim(128), 1), dtype=np.float32, name='memset.643', buffer=nl.sbuf)
20
+ v7 = nl.ndarray((86, 2, nl.par_dim(128), 1024), dtype=np.float32, name='theta_local_608', buffer=nl.sbuf)
21
+ v8 = nl.ndarray((86, 2, nl.par_dim(128), 1024), dtype=np.float32, name='m_local_602', buffer=nl.sbuf)
22
+ v9 = nl.ndarray((86, 2, nl.par_dim(128), 1024), dtype=np.float32, name='v_local_596', buffer=nl.sbuf)
23
+ v10 = nl.ndarray((86, 2, nl.par_dim(128), 1024), dtype=np.float32, name='g_local_590', buffer=nl.sbuf)
24
+ v11 = nl.ndarray((86, 2, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v12 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
26
+ v13 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v14 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
28
+ v15 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
29
+ v16 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v17 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
31
+ v18 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v19 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
33
+ v20 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
34
+ v21 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
35
+ v22 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
36
+ v23 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
37
+ v24 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
38
+ v25 = nl.ndarray((86, 2, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
39
+ v6[nl.arange(128)[:, None], 0] = nisa.memset(shape=(128, 1), value=np.dtype(np.uint16).type(0), dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
40
+ for i0 in nl.affine_range(86):
41
+ for i1 in nl.affine_range(2):
42
+ v7[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v1[128 * i0 + nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0)
43
+ v8[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v3[128 * i0 + nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0)
44
+ v9[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v4[128 * i0 + nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0)
45
+ v10[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[128 * i0 + nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0)
46
+ for i2 in nl.affine_range(2):
47
+ v12[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v7[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(1e-05), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
48
+ v13[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.subtract(v7[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], v12[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
49
+ v14[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v8[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.9), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
50
+ v15[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v10[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.1), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
51
+ v16[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.add(v14[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], v15[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
52
+ v17[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v16[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.01), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
53
+ v18[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v9[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.999), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
54
+ v19[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v10[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.001), reverse0=True, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
55
+ v20[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.multiply(v19[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], v10[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
56
+ v21[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.add(v18[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], v20[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
57
+ v22[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.sqrt, data=v21[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], bias=v6[nl.arange(128)[:, None], 0], scale=1000.0, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
58
+ v23[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v22[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.add, operand0=np.dtype(np.float32).type(1e-08), reverse0=False, dtype=np.float32, mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, engine=nki.isa.unknown_engine)
59
+ v24[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.reciprocal(data=v23[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
60
+ v25[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.multiply(v17[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], v24[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
61
+ v11[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]] = nl.subtract(v13[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], v25[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0, dtype=np.float32)
62
+ ' end loop i2 '
63
+ nl.store(v5[128 * i0 + nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], value=v11[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=-128 * i0 + -1 * nl.arange(128)[:, None] + 10943 >= 0)
64
+ ' end loop i1 '
65
+ ' end loop i0 '
66
+ return v5
kernels/add_rmsnorm_matmul_M4096_N2048_K1024_0.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+
5
+
6
+ @nki.jit
7
+ def kernel(x_tensor, w_tensor, eps, z_tensor, g_tensor):
8
+ # Specialized for M=4096, N=2048, K=1024
9
+ M, K, N = 4096, 1024, 2048
10
+ # Verify input shapes match our specialization
11
+ assert x_tensor.shape == (M, K)
12
+ assert w_tensor.shape == (K, N)
13
+ assert z_tensor.shape == (M, K)
14
+ assert g_tensor.shape == (K,)
15
+
16
+ TILE_M = 128
17
+ TILE_K = 128
18
+ TILE_N = 512 # nl.tile_size.gemm_moving_fmax
19
+ # Generate tensor indices to index input tensor
20
+ ix = nl.arange(TILE_M)[:, None]
21
+ iw = nl.arange(1)[:, None]
22
+ iy = nl.arange(K)[None, :]
23
+ iz = nl.arange(TILE_N)[None, :]
24
+
25
+ result = nl.ndarray((M, N), dtype=x_tensor.dtype, buffer=nl.shared_hbm)
26
+ # Load RMSNorm weight once, reused by rows/tiles of x_tensor
27
+ g_tile = nl.load(g_tensor.reshape((1, K))[iw, iy])
28
+
29
+ # Process 128 rows at a time due to 128-partition tile size limitation
30
+ # Since we're not reducing across the first dimension
31
+ # Tiles can be processed independently
32
+ for i in nl.affine_range(32): # 4096 / 128 = 32 iterations
33
+
34
+ # Load input data from external memory to on-chip memory
35
+ x_tile = nl.load(x_tensor[i * TILE_M + ix, iy])
36
+ z_tile = nl.load(z_tensor[i * TILE_M + ix, iy])
37
+
38
+ a_tile = nl.add(x_tile, z_tile)
39
+
40
+ # Compute element-wise square of x_tensor
41
+ in_square = nl.square(a_tile)
42
+
43
+ # Calculate sum of squared elements, along last dimension
44
+ square_sum = nl.sum(in_square, axis=[1])
45
+
46
+ # Scale and get a reciprocal
47
+ mean = square_sum / K
48
+ mean = nl.add(mean, eps)
49
+
50
+ # Take square root of mean and then reciprocal with
51
+ # rsqrt API (one ISA instruction)
52
+ rms_reciprocal = nl.rsqrt(mean)
53
+
54
+ # Scale the input tensor
55
+ rmsnorm_out_tile = nl.multiply(a_tile, rms_reciprocal)
56
+
57
+ # Broadcast weight along first axis to match tensor shape
58
+ # num_rows_active = min(num_rows - i * 128, 128)
59
+ g_bcast = g_tile.broadcast_to((TILE_M, K))
60
+
61
+ # Multiply with the RMSNorm weight
62
+ rmsnorm_out_tile[...] = nl.multiply(rmsnorm_out_tile, g_bcast)
63
+
64
+ # Load w_tensor
65
+ for n in nl.affine_range(4): # 2048 / 512 = 4 iterations
66
+ res_psum = nl.zeros((TILE_M, TILE_N), nl.float32, buffer=nl.psum)
67
+ for k in nl.affine_range(8): # 1024 / 128 = 8 iterations
68
+ w_tile = nl.load(w_tensor[k * TILE_K:(k + 1) * TILE_K, n * TILE_N:(n + 1) * TILE_N])
69
+ res_psum += nl.matmul(rmsnorm_out_tile[:, k * TILE_K: (k + 1) * TILE_K], w_tile)
70
+ res_sb = nl.copy(res_psum, dtype=result.dtype)
71
+ nl.store(result[i * TILE_M + ix, n * TILE_N + iz], value=res_sb)
72
+
73
+ return result
74
+
75
+
76
+
77
+
78
+
79
+
80
+
kernels/bmm_B16_M4096_K64_N4096_0.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v3 = nl.ndarray((16, 32, 128, 4096), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v4 = nl.shared_constant(np.identity(128, dtype=np.float32))
20
+ v5 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_77', buffer=nl.sbuf)
21
+ v6 = nl.ndarray((16, 4, nl.par_dim(64), 1024), dtype=np.float32, name='rhs_local_45', buffer=nl.sbuf)
22
+ v7 = nl.ndarray((4, 16, 4, 8, nl.par_dim(128), 64), dtype=np.float32, name='', buffer=nl.sbuf)
23
+ v8 = nl.zeros((16, 4, 4, 8, nl.par_dim(64), 128), dtype=np.float32, name='34.73', buffer=nl.psum, lazy_initialization=True)
24
+ v9 = nl.ndarray((4, 16, 4, nl.par_dim(64), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v10 = nl.zeros((16, 4, 4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
26
+ v11 = nl.ndarray((16, 4, 8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v5[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v4[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
28
+ for i0 in nl.affine_range(16):
29
+ for i1 in nl.affine_range(4):
30
+ v6[i0, i1, nl.arange(64)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i0, nl.arange(64)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
31
+ for i2 in nl.affine_range(4):
32
+ for i3 in nl.affine_range(8):
33
+ v7[i1, i0, i2, i3, nl.arange(128)[:, None], nl.arange(64)[None, :]] = nl.load(v1[i0, 1024 * i2 + 128 * i3 + nl.arange(128)[:, None], nl.arange(64)[None, :]], dtype=np.float32, mask=None)
34
+ v8[i0, i1, i2, i3, nl.arange(64)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v7[i1, i0, i2, i3, nl.arange(128)[:, None], nl.arange(64)[None, :]], v5[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
35
+ v9[i1, i0, i2, nl.arange(64)[:, None], 128 * i3 + nl.arange(128)[None, :]] = nl.copy(v8[i0, i1, i2, i3, nl.arange(64)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
36
+ ' end loop i3 '
37
+ for i4 in nl.affine_range(8):
38
+ for i5 in nl.affine_range(2):
39
+ v10[i0, i1, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.nc_matmul(v9[i1, i0, i2, nl.arange(64)[:, None], 128 * i4 + nl.arange(128)[None, :]], v6[i0, i1, nl.arange(64)[:, None], 512 * i5 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
40
+ v11[i0, i2, i4, i1, nl.arange(128)[:, None], 512 * i5 + nl.arange(512)[None, :]] = nl.copy(v10[i0, i1, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
41
+ ' end loop i5 '
42
+ nl.store(v3[i0, i4 + 8 * i2, nl.arange(128)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], value=v11[i0, i2, i4, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
43
+ ' end loop i4 '
44
+ ' end loop i2 '
45
+ ' end loop i1 '
46
+ ' end loop i0 '
47
+ return v3
kernels/bmm_softmax_B16_K64_M4096_N4096_0.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v3 = nl.ndarray((16, 32, 128, 4096), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v4 = nl.shared_constant(np.identity(128, dtype=np.float32))
20
+ v5 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_140', buffer=nl.sbuf)
21
+ v6 = nl.ndarray((16, 4, nl.par_dim(64), 1024), dtype=np.float32, name='rhs_local_89', buffer=nl.sbuf)
22
+ v7 = nl.ndarray((4, 16, 4, 8, nl.par_dim(128), 64), dtype=np.float32, name='', buffer=nl.sbuf)
23
+ v8 = nl.zeros((16, 4, 4, 8, nl.par_dim(64), 128), dtype=np.float32, name='73.136', buffer=nl.psum, lazy_initialization=True)
24
+ v9 = nl.ndarray((4, 16, 4, nl.par_dim(64), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v10 = nl.zeros((16, 4, 4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
26
+ v11 = nl.ndarray((4, 8, 16, 4, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v12 = nl.ndarray((16, 4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
28
+ v13 = nl.ndarray((16, 4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
29
+ v14 = nl.ndarray((4, 8, 16, 4, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v15 = nl.ndarray((16, 4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
31
+ v16 = nl.ndarray((16, 4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v17 = nl.ndarray((16, 4, 8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
33
+ v18 = nl.ndarray((16, 4, 4, 8, 2, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
34
+ v19 = nl.ndarray((16, 4, 8, 4, 2, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
35
+ v5[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v4[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
36
+ for i0 in nl.affine_range(16):
37
+ for i1 in nl.affine_range(4):
38
+ v6[i0, i1, nl.arange(64)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i0, nl.arange(64)[:, None], 1024 * i1 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
39
+ for i2 in nl.affine_range(4):
40
+ for i3 in nl.affine_range(8):
41
+ v7[i1, i0, i2, i3, nl.arange(128)[:, None], nl.arange(64)[None, :]] = nl.load(v1[i0, 1024 * i2 + 128 * i3 + nl.arange(128)[:, None], nl.arange(64)[None, :]], dtype=np.float32, mask=None)
42
+ v8[i0, i1, i2, i3, nl.arange(64)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v7[i1, i0, i2, i3, nl.arange(128)[:, None], nl.arange(64)[None, :]], v5[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
43
+ v9[i1, i0, i2, nl.arange(64)[:, None], 128 * i3 + nl.arange(128)[None, :]] = nl.copy(v8[i0, i1, i2, i3, nl.arange(64)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
44
+ ' end loop i3 '
45
+ for i4 in nl.affine_range(8):
46
+ for i5 in nl.affine_range(2):
47
+ v10[i0, i1, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.nc_matmul(v9[i1, i0, i2, nl.arange(64)[:, None], 128 * i4 + nl.arange(128)[None, :]], v6[i0, i1, nl.arange(64)[:, None], 512 * i5 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
48
+ v11[i2, i4, i0, i1, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.copy(v10[i0, i1, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
49
+ v18[i0, i1, i2, i4, i5, nl.arange(128)[:, None], 0] = nisa.tensor_reduce(nl.max, data=v11[i2, i4, i0, i1, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, axis=[1], dtype=np.float32, negate=False)
50
+ v12[i0, i2, i4, nl.arange(128)[:, None], 0] = nl.loop_reduce(v18[i0, i1, i2, i4, i5, nl.arange(128)[:, None], 0], op=np.max, loop_indices=[i1, i5], mask=None, dtype=np.float32)
51
+ ' end loop i5 '
52
+ ' end loop i4 '
53
+ ' end loop i2 '
54
+ ' end loop i1 '
55
+ for i6 in nl.affine_range(4):
56
+ for i7 in nl.affine_range(8):
57
+ v13[i0, i6, i7, nl.arange(128)[:, None], 0] = nisa.tensor_scalar(data=v12[i0, i6, i7, nl.arange(128)[:, None], 0], op0=nl.maximum, operand0=np.dtype(np.float32).type(-3.4028235e+38), reverse0=False, op1=nl.multiply, operand1=np.dtype(np.float32).type(-1.0), reverse1=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
58
+ for i8 in nl.affine_range(4):
59
+ for i9 in nl.affine_range(2):
60
+ v14[i6, i7, i0, i8, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.exp, data=v11[i6, i7, i0, i8, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], bias=v13[i0, i6, i7, nl.arange(128)[:, None], 0], scale=1.0, mask=None, dtype=np.float32)
61
+ v19[i0, i6, i7, i8, i9, nl.arange(128)[:, None], 0] = nisa.tensor_reduce(nl.add, data=v14[i6, i7, i0, i8, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, axis=[1], dtype=np.float32, negate=False)
62
+ v15[i0, i6, i7, nl.arange(128)[:, None], 0] = nl.loop_reduce(v19[i0, i6, i7, i8, i9, nl.arange(128)[:, None], 0], op=np.add, loop_indices=[i8, i9], mask=None, dtype=np.float32)
63
+ ' end loop i9 '
64
+ ' end loop i8 '
65
+ v16[i0, i6, i7, nl.arange(128)[:, None], 0] = nisa.reciprocal(data=v15[i0, i6, i7, nl.arange(128)[:, None], 0], mask=None, dtype=np.float32)
66
+ for i10 in nl.affine_range(4):
67
+ for i11 in nl.affine_range(2):
68
+ v17[i0, i6, i7, i10, nl.arange(128)[:, None], 512 * i11 + nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v14[i6, i7, i0, i10, i11, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.multiply, operand0=v16[i0, i6, i7, nl.arange(128)[:, None], 0], reverse0=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
69
+ ' end loop i11 '
70
+ nl.store(v3[i0, 8 * i6 + i7, nl.arange(128)[:, None], 1024 * i10 + nl.arange(1024)[None, :]], value=v17[i0, i6, i7, i10, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
71
+ ' end loop i10 '
72
+ ' end loop i7 '
73
+ ' end loop i6 '
74
+ ' end loop i0 '
75
+ return v3
kernels/gqa_full_B1_N4096_QH16_KH8_D128_0.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2, v3):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v4 = nl.ndarray((1, 8, 2, 32, 128, 128), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v5 = nl.shared_constant(np.identity(128, dtype=np.float32))
20
+ v6 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_224', buffer=nl.sbuf)
21
+ v7 = nl.ndarray((32, 2, nl.par_dim(128), 1024), dtype=np.float32, name='146.215', buffer=nl.sbuf)
22
+ v8 = nl.zeros((32, 2, 8, nl.par_dim(128), 128), dtype=np.float32, name='146.220', buffer=nl.psum, lazy_initialization=True)
23
+ v9 = nl.ndarray((2, 32, nl.par_dim(128), 8, 128), dtype=np.float32, name='q_pftranspose_146', buffer=nl.sbuf)
24
+ v10 = nl.ndarray((2, 4, 4, nl.par_dim(128), 1024), dtype=np.float32, name='150.229', buffer=nl.sbuf)
25
+ v11 = nl.zeros((2, 4, 4, 8, nl.par_dim(128), 128), dtype=np.float32, name='150.234', buffer=nl.psum, lazy_initialization=True)
26
+ v12 = nl.ndarray((2, 4, nl.par_dim(128), 8, 512), dtype=np.float32, name='k_pftranspose_150', buffer=nl.sbuf)
27
+ v13 = nl.zeros((32, 2, 4, 2, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
28
+ v14 = nl.ndarray((32, 2, 4, 2, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
29
+ v15 = nl.ndarray((2, 4, 2, 32, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v16 = nl.ndarray((8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='v_local_169', buffer=nl.sbuf)
31
+ v17 = nl.ndarray((2, 2, 4, 32, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v18 = nl.ndarray((32, 2, 4, 2, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
33
+ v19 = nl.ndarray((2, 4, 2, 32, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
34
+ v20 = nl.ndarray((2, 4, 2, 32, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
35
+ v21 = nl.ndarray((32, 2, 4, 2, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
36
+ v22 = nl.zeros((2, 2, 4, 32, 2, 4, 4, nl.par_dim(128), 128), dtype=np.float32, name='135.238', buffer=nl.psum, lazy_initialization=True)
37
+ v23 = nl.ndarray((8, 4, 2, 4, 2, 32, nl.par_dim(128), 128), dtype=np.float32, name='t51_pftranspose_135', buffer=nl.sbuf)
38
+ v24 = nl.zeros((2, 2, 4, 32, nl.par_dim(128), 128), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
39
+ v25 = nl.ndarray((2, 4, 2, 32, nl.par_dim(128), 128), dtype=np.float32, name='', buffer=nl.sbuf)
40
+ v26 = nl.ndarray((2, 2, 4, 4, 2, 32, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
41
+ v27 = nl.ndarray((2, 2, 4, 32, 2, 4, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
42
+ v6[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v5[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
43
+ for i0 in nl.affine_range(32):
44
+ for i1 in nl.affine_range(2):
45
+ v7[i0, i1, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[i0, nl.arange(128)[:, None, None], 8 * i1 + nl.arange(8)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
46
+ for i2 in nl.affine_range(8):
47
+ v8[i0, i1, i2, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v7[i0, i1, nl.arange(128)[:, None], 128 * i2 + nl.arange(128)[None, :]], v6[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
48
+ v9[i1, i0, nl.arange(128)[:, None], i2, nl.arange(128)[None, :]] = nl.copy(v8[i0, i1, i2, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
49
+ ' end loop i2 '
50
+ ' end loop i1 '
51
+ ' end loop i0 '
52
+ for i3 in nl.affine_range(2):
53
+ for i4 in nl.affine_range(4):
54
+ for i5 in nl.affine_range(4):
55
+ v10[i3, i4, i5, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v2[0, i4 + 4 * i3, i5, nl.arange(128)[:, None, None], nl.arange(8)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
56
+ for i6 in nl.affine_range(8):
57
+ v11[i3, i4, i5, i6, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v10[i3, i4, i5, nl.arange(128)[:, None], 128 * i6 + nl.arange(128)[None, :]], v6[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
58
+ v12[i3, i4, nl.arange(128)[:, None], i6, 128 * i5 + nl.arange(128)[None, :]] = nl.copy(v11[i3, i4, i5, i6, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
59
+ ' end loop i6 '
60
+ ' end loop i5 '
61
+ ' end loop i4 '
62
+ for i7 in nl.affine_range(2):
63
+ for i8 in nl.affine_range(4):
64
+ for i9 in nl.affine_range(4):
65
+ for i10 in nl.affine_range(2):
66
+ for i11 in nl.affine_range(32):
67
+ v13[i11, i7, i8, i10, i3, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.nc_matmul(v9[i7, i11, nl.arange(128)[:, None], i10 + 2 * i8, nl.arange(128)[None, :]], v12[i3, i9, nl.arange(128)[:, None], i8 + 4 * i7, nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
68
+ v14[i11, i7, i8, i10, i3, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v13[i11, i7, i8, i10, i3, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.08838835154663706), reverse0=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
69
+ v26[i3, i7, i8, i9, i10, i11, nl.arange(128)[:, None], 0] = nisa.tensor_reduce(nl.max, data=v14[i11, i7, i8, i10, i3, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, axis=[1], dtype=np.float32, negate=False)
70
+ v15[i7, i8, i10, i11, nl.arange(128)[:, None], 0] = nl.loop_reduce(v26[i3, i7, i8, i9, i10, i11, nl.arange(128)[:, None], 0], op=np.max, loop_indices=[i3, i9], mask=None, dtype=np.float32)
71
+ ' end loop i11 '
72
+ ' end loop i10 '
73
+ ' end loop i9 '
74
+ ' end loop i8 '
75
+ ' end loop i7 '
76
+ ' end loop i3 '
77
+ for i12 in nl.affine_range(8):
78
+ for i13 in nl.affine_range(4):
79
+ v16[i12, i13, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v3[0, 4 * i12 + i13, nl.arange(128)[:, None], nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
80
+ ' end loop i13 '
81
+ ' end loop i12 '
82
+ for i14 in nl.affine_range(2):
83
+ for i15 in nl.affine_range(2):
84
+ for i16 in nl.affine_range(4):
85
+ for i17 in nl.affine_range(32):
86
+ v17[i14, i15, i16, i17, nl.arange(128)[:, None], 0] = nisa.tensor_scalar(data=v15[i15, i16, i14, i17, nl.arange(128)[:, None], 0], op0=nl.maximum, operand0=np.dtype(np.float32).type(-3.4028235e+38), reverse0=False, op1=nl.multiply, operand1=np.dtype(np.float32).type(-1.0), reverse1=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
87
+ for i18 in nl.affine_range(2):
88
+ for i19 in nl.affine_range(4):
89
+ v18[i17, i15, i16, i14, i18, i19, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.exp, data=v14[i17, i15, i16, i14, i18, i19, nl.arange(128)[:, None], nl.arange(512)[None, :]], bias=v17[i14, i15, i16, i17, nl.arange(128)[:, None], 0], scale=1.0, mask=None, dtype=np.float32)
90
+ v27[i14, i15, i16, i17, i18, i19, nl.arange(128)[:, None], 0] = nisa.tensor_reduce(nl.add, data=v18[i17, i15, i16, i14, i18, i19, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, axis=[1], dtype=np.float32, negate=False)
91
+ v19[i15, i16, i14, i17, nl.arange(128)[:, None], 0] = nl.loop_reduce(v27[i14, i15, i16, i17, i18, i19, nl.arange(128)[:, None], 0], op=np.add, loop_indices=[i19, i18], mask=None, dtype=np.float32)
92
+ ' end loop i19 '
93
+ ' end loop i18 '
94
+ v20[i15, i16, i14, i17, nl.arange(128)[:, None], 0] = nisa.reciprocal(data=v19[i15, i16, i14, i17, nl.arange(128)[:, None], 0], mask=None, dtype=np.float32)
95
+ for i20 in nl.affine_range(2):
96
+ for i21 in nl.affine_range(4):
97
+ v21[i17, i15, i16, i14, 4 * i20 + i21, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v18[i17, i15, i16, i14, i20, i21, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.multiply, operand0=v20[i15, i16, i14, i17, nl.arange(128)[:, None], 0], reverse0=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
98
+ for i22 in nl.affine_range(4):
99
+ v22[i14, i15, i16, i17, i20, i21, i22, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v21[i17, i15, i16, i14, 4 * i20 + i21, nl.arange(128)[:, None], 128 * i22 + nl.arange(128)[None, :]], v6[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
100
+ v23[4 * i20 + i21, i22, i15, i16, i14, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.copy(v22[i14, i15, i16, i17, i20, i21, i22, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
101
+ ' end loop i22 '
102
+ ' end loop i21 '
103
+ ' end loop i20 '
104
+ for i23 in nl.affine_range(8):
105
+ for i24 in nl.affine_range(4):
106
+ v24[i14, i15, i16, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]] += nisa.nc_matmul(v23[i23, i24, i15, i16, i14, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]], v16[i23, i24, nl.arange(128)[:, None], 128 * i16 + 512 * i15 + nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
107
+ ' end loop i24 '
108
+ ' end loop i23 '
109
+ v25[i15, i16, i14, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.copy(v24[i14, i15, i16, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
110
+ nl.store(v4[0, i16 + 4 * i15, i14, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]], value=v25[i15, i16, i14, i17, nl.arange(128)[:, None], nl.arange(128)[None, :]], mask=None)
111
+ ' end loop i17 '
112
+ ' end loop i16 '
113
+ ' end loop i15 '
114
+ ' end loop i14 '
115
+ return v4
kernels/lora_M4096_N12288_K5120_R128_0.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2, v3, v4):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v5 = nl.ndarray((8, 4, 128, 96, 128), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v6 = nl.shared_constant(np.identity(128, dtype=np.float32))
20
+ v7 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_111', buffer=nl.sbuf)
21
+ v8 = nl.ndarray((5, 8, nl.par_dim(128), 128), dtype=np.float32, name='a_local_56', buffer=nl.sbuf)
22
+ v9 = nl.ndarray((2, 4, 5, 4, nl.par_dim(128), 1024), dtype=np.float32, name='38.102', buffer=nl.sbuf)
23
+ v10 = nl.zeros((2, 4, 5, 4, 8, nl.par_dim(128), 128), dtype=np.float32, name='38.107', buffer=nl.psum, lazy_initialization=True)
24
+ v11 = nl.ndarray((2, 4, 5, nl.par_dim(128), 8, 512), dtype=np.float32, name='x_pftranspose_38', buffer=nl.sbuf)
25
+ v12 = nl.zeros((2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
26
+ v13 = nl.ndarray((2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v14 = nl.ndarray((2, 12, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
28
+ v15 = nl.zeros((2, 12, 4, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
29
+ v16 = nl.ndarray((12, 2, 4, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v17 = nl.ndarray((12, 4, 2, 5, nl.par_dim(128), 1024), dtype=np.float32, name='w_local_73', buffer=nl.sbuf)
31
+ v18 = nl.ndarray((12, 4, 2, 4, 2, 4, nl.par_dim(128), 640), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v19 = nl.zeros((12, 4, 2, 4, 2, 4, 5, nl.par_dim(128), 128), dtype=np.float32, name='38.122', buffer=nl.psum, lazy_initialization=True)
33
+ v20 = nl.ndarray((12, 2, 4, 4, 2, nl.par_dim(128), 5, 512), dtype=np.float32, name='', buffer=nl.sbuf)
34
+ v21 = nl.zeros((12, 4, 2, 4, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
35
+ v22 = nl.full((12, 2, 4, 2, 4, nl.par_dim(128), 512), fill_value=np.dtype(np.float32).type(0), dtype=np.float32, name='', buffer=nl.sbuf)
36
+ v23 = nl.ndarray((2, 4, 12, 2, 4, nl.par_dim(128), 512), dtype=np.float32, name='.o0_pftranspose_42', buffer=nl.sbuf)
37
+ v24 = nl.zeros((12, 2, 4, 2, 4, 4, nl.par_dim(128), 128), dtype=np.float32, name='42.130', buffer=nl.psum, lazy_initialization=True)
38
+ v25 = nl.ndarray((12, 2, 4, 2, 4, 4, nl.par_dim(128), 128), dtype=np.float32, name='42.143', buffer=nl.sbuf)
39
+ v7[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v6[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
40
+ for i0 in nl.affine_range(5):
41
+ for i1 in nl.affine_range(8):
42
+ v8[i0, i1, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v3[8 * i0 + i1, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
43
+ ' end loop i1 '
44
+ ' end loop i0 '
45
+ for i2 in nl.affine_range(2):
46
+ for i3 in nl.affine_range(4):
47
+ for i4 in nl.affine_range(5):
48
+ for i5 in nl.affine_range(4):
49
+ v9[i2, i3, i4, i5, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[i3 + 4 * i2, i5, nl.arange(128)[:, None, None], 8 * i4 + nl.arange(8)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
50
+ for i6 in nl.affine_range(8):
51
+ v10[i2, i3, i4, i5, i6, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v9[i2, i3, i4, i5, nl.arange(128)[:, None], 128 * i6 + nl.arange(128)[None, :]], v7[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
52
+ v11[i2, i3, i4, nl.arange(128)[:, None], i6, 128 * i5 + nl.arange(128)[None, :]] = nl.copy(v10[i2, i3, i4, i5, i6, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
53
+ ' end loop i6 '
54
+ ' end loop i5 '
55
+ ' end loop i4 '
56
+ for i7 in nl.affine_range(5):
57
+ for i8 in nl.affine_range(8):
58
+ v12[i2, i3, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v8[i7, i8, nl.arange(128)[:, None], nl.arange(128)[None, :]], v11[i2, i3, i7, nl.arange(128)[:, None], i8, nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
59
+ ' end loop i8 '
60
+ ' end loop i7 '
61
+ v13[i2, i3, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.copy(v12[i2, i3, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
62
+ ' end loop i3 '
63
+ for i9 in nl.affine_range(12):
64
+ v14[i2, i9, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v4[nl.arange(128)[:, None], 1024 * i9 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
65
+ for i10 in nl.affine_range(4):
66
+ for i11 in nl.affine_range(2):
67
+ for i12 in nl.affine_range(4):
68
+ v15[i2, i9, i10, i11, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.nc_matmul(v14[i2, i9, nl.arange(128)[:, None], 512 * i11 + 128 * i12 + nl.arange(128)[None, :]], v13[i2, i10, nl.arange(128)[:, None], nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
69
+ v16[i9, i11, i12, i2, i10, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.copy(v15[i2, i9, i10, i11, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
70
+ ' end loop i12 '
71
+ ' end loop i11 '
72
+ ' end loop i10 '
73
+ ' end loop i9 '
74
+ ' end loop i2 '
75
+ for i13 in nl.affine_range(12):
76
+ for i14 in nl.sequential_range(4):
77
+ for i15 in nl.affine_range(2):
78
+ for i16 in nl.affine_range(5):
79
+ v17[i13, i14, i15, i16, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i16 + 5 * i15 + 10 * i14, nl.arange(128)[:, None], 1024 * i13 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
80
+ ' end loop i16 '
81
+ ' end loop i15 '
82
+ for i17 in nl.affine_range(2):
83
+ for i18 in nl.affine_range(4):
84
+ for i19 in nl.affine_range(2):
85
+ for i20 in nl.affine_range(4):
86
+ v18[i13, i14, i17, i18, i19, i20, nl.arange(128)[:, None, None], 128 * nl.arange(5)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[4 * i17 + i18, i20, nl.arange(128)[:, None, None], 5 * i19 + 10 * i14 + nl.arange(5)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
87
+ for i21 in nl.affine_range(5):
88
+ v19[i13, i14, i17, i18, i19, i20, i21, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v18[i13, i14, i17, i18, i19, i20, nl.arange(128)[:, None], 128 * i21 + nl.arange(128)[None, :]], v7[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
89
+ v20[i13, i17, i18, i14, i19, nl.arange(128)[:, None], i21, 128 * i20 + nl.arange(128)[None, :]] = nl.copy(v19[i13, i14, i17, i18, i19, i20, i21, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
90
+ ' end loop i21 '
91
+ ' end loop i20 '
92
+ ' end loop i19 '
93
+ for i22 in nl.affine_range(2):
94
+ for i23 in nl.affine_range(4):
95
+ for i24 in nl.affine_range(2):
96
+ for i25 in nl.affine_range(5):
97
+ v21[i13, i14, i17, i18, i22, i23, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v17[i13, i14, i24, i25, nl.arange(128)[:, None], 128 * i23 + 512 * i22 + nl.arange(128)[None, :]], v20[i13, i17, i18, i14, i24, nl.arange(128)[:, None], i25, nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
98
+ ' end loop i25 '
99
+ ' end loop i24 '
100
+ v22[i13, i22, i23, i17, i18, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.loop_reduce(v21[i13, i14, i17, i18, i22, i23, nl.arange(128)[:, None], nl.arange(512)[None, :]], op=np.add, loop_indices=[i14], mask=None, dtype=np.float32)
101
+ ' end loop i23 '
102
+ ' end loop i22 '
103
+ ' end loop i18 '
104
+ ' end loop i17 '
105
+ ' end loop i14 '
106
+ for i26 in nl.affine_range(2):
107
+ for i27 in nl.affine_range(4):
108
+ for i28 in nl.affine_range(2):
109
+ for i29 in nl.affine_range(4):
110
+ v23[i26, i27, i13, i28, i29, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.add(v22[i13, i28, i29, i26, i27, nl.arange(128)[:, None], nl.arange(512)[None, :]], v16[i13, i28, i29, i26, i27, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
111
+ ' end loop i29 '
112
+ for i30 in nl.affine_range(4):
113
+ for i31 in nl.affine_range(4):
114
+ v24[i13, i26, i27, i28, i30, i31, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v23[i26, i27, i13, i28, i30, nl.arange(128)[:, None], 128 * i31 + nl.arange(128)[None, :]], v7[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
115
+ v25[i13, i26, i27, i28, i30, i31, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.copy(v24[i13, i26, i27, i28, i30, i31, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
116
+ nl.store(v5[4 * i26 + i27, i31, nl.arange(128)[:, None], i30 + 4 * i28 + 8 * i13, nl.arange(128)[None, :]], value=v25[i13, i26, i27, i28, i30, i31, nl.arange(128)[:, None], nl.arange(128)[None, :]], mask=None)
117
+ ' end loop i31 '
118
+ ' end loop i30 '
119
+ ' end loop i28 '
120
+ ' end loop i27 '
121
+ ' end loop i26 '
122
+ ' end loop i13 '
123
+ return v5
kernels/mamba_M7168_C256_S16_0.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import neuronxcc.nki as nki
2
+ import neuronxcc.nki.language as nl
3
+ import neuronxcc.nki.isa as nisa
4
+ import numpy as np
5
+
6
+ @nki.jit
7
+ def kernel(delta, u, a, b, c):
8
+ channels, seq_len = delta.shape
9
+ output = nl.ndarray((channels, seq_len), dtype=delta.dtype,
10
+ buffer=nl.shared_hbm)
11
+
12
+ _, state_size = a.shape
13
+
14
+ # We can relax this using mask paramters in all the NKI API calls
15
+ assert channels % 128 == 0
16
+
17
+ # Map channels to the partition dimension
18
+ # Tile channels to comply with NKI tile size constraints
19
+ channel_psize = nl.tile_size.pmax
20
+ n_channel_tile = channels // channel_psize
21
+
22
+ # partial accumulated scanC result with processed states
23
+ scanC_accum = nl.zeros((n_channel_tile, nl.par_dim(channel_psize), seq_len), dtype=delta.dtype)
24
+
25
+ # Second outer loop with state_size, partial parallel
26
+ for i_state in nl.affine_range(state_size):
27
+
28
+ # Inner loop: tiling channels
29
+ for i_channel_tile in nl.affine_range(n_channel_tile):
30
+ channel_start = i_channel_tile * channel_psize
31
+
32
+ # Load the relevant tile from delta and A
33
+ delta_i = nl.load(delta[channel_start:channel_start+channel_psize, 0:seq_len])
34
+ A_i = nl.load(a[channel_start:channel_start+channel_psize, i_state])
35
+
36
+ # Step 1&2: Element-wise multiplication of delta_i and A_i and then exponential
37
+ deltaA = nisa.activation(op=nl.exp, data=delta_i, scale=A_i)
38
+
39
+ # Load the relevant tile from u and B
40
+ u_i = nl.load(u[channel_start:channel_start+channel_psize, 0:seq_len])
41
+ B_i = nl.load(b[i_state:i_state+1, 0:seq_len])
42
+
43
+ # Step 3: Element-wise multiplication of delta_i, B_i and u_i
44
+ deltaU = nisa.tensor_tensor(delta_i, u_i, op=nl.multiply)
45
+ B_i_bcast = B_i.broadcast_to((channel_psize, seq_len))
46
+ deltaBu = nisa.tensor_tensor(deltaU, B_i_bcast, op=nl.multiply)
47
+
48
+ # Step 4: Associative scan between deltaA and deltaBu
49
+ scan_res = nki.isa.tensor_tensor_scan(deltaA, deltaBu, initial=0,
50
+ op0=np.multiply, op1=np.add)
51
+
52
+ # Load the relevant tile from C
53
+ C_i = nl.load(c[i_state:i_state+1, 0:seq_len])
54
+
55
+ # Step 5: Element-wise multiplication of scan_res and C_i
56
+ C_i_bcast = C_i.broadcast_to((channel_psize, seq_len))
57
+ scanC = nisa.tensor_tensor(scan_res, C_i_bcast, op=nl.multiply)
58
+
59
+ # Step 6: Accumulation of scanC along state_size dimension
60
+ scanC_accum[i_channel_tile, 0:channel_psize, 0:seq_len] += scanC
61
+
62
+ # Store scanC_accum for a single batch to output
63
+ for i_channel_tile in nl.affine_range(n_channel_tile):
64
+ channel_start = i_channel_tile * channel_psize
65
+ nl.store(output[channel_start:channel_start+channel_psize, 0:seq_len],
66
+ scanC_accum[i_channel_tile, 0:channel_psize, 0:seq_len])
67
+
68
+ return output
kernels/matmul_M4096_N12288_K5120_0.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v3 = nl.ndarray((32, 128, 12288), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v4 = nl.shared_constant(np.identity(128, dtype=np.float32))
20
+ v5 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_80', buffer=nl.sbuf)
21
+ v6 = nl.ndarray((12, 5, 8, nl.par_dim(128), 1024), dtype=np.float32, name='rhs_local_49', buffer=nl.sbuf)
22
+ v7 = nl.ndarray((12, 32, 5, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
23
+ v8 = nl.zeros((12, 32, 5, 8, nl.par_dim(128), 128), dtype=np.float32, name='38.76', buffer=nl.psum, lazy_initialization=True)
24
+ v9 = nl.ndarray((12, 32, 5, nl.par_dim(128), 8, 128), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v10 = nl.zeros((12, 32, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
26
+ v11 = nl.ndarray((32, 12, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v5[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v4[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
28
+ for i0 in nl.affine_range(12):
29
+ for i1 in nl.affine_range(5):
30
+ for i2 in nl.affine_range(8):
31
+ v6[i0, i1, i2, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i2 + 8 * i1, nl.arange(128)[:, None], nl.arange(1024)[None, :] + 1024 * i0], dtype=np.float32, mask=None)
32
+ ' end loop i2 '
33
+ ' end loop i1 '
34
+ for i3 in nl.affine_range(32):
35
+ for i4 in nl.affine_range(5):
36
+ v7[i0, i3, i4, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[i3, nl.arange(128)[:, None, None], nl.arange(8)[None, :, None] + 8 * i4, nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
37
+ for i5 in nl.affine_range(8):
38
+ v8[i0, i3, i4, i5, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v7[i0, i3, i4, nl.arange(128)[:, None], 128 * i5 + nl.arange(128)[None, :]], v5[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
39
+ v9[i0, i3, i4, nl.arange(128)[:, None], i5, nl.arange(128)[None, :]] = nl.copy(v8[i0, i3, i4, i5, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
40
+ ' end loop i5 '
41
+ ' end loop i4 '
42
+ for i6 in nl.affine_range(2):
43
+ for i7 in nl.affine_range(5):
44
+ for i8 in nl.affine_range(8):
45
+ v10[i0, i3, i6, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v9[i0, i3, i7, nl.arange(128)[:, None], i8, nl.arange(128)[None, :]], v6[i0, i7, i8, nl.arange(128)[:, None], 512 * i6 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
46
+ ' end loop i8 '
47
+ ' end loop i7 '
48
+ v11[i3, i0, nl.arange(128)[:, None], 512 * i6 + nl.arange(512)[None, :]] = nl.copy(v10[i0, i3, i6, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
49
+ ' end loop i6 '
50
+ nl.store(v3[i3, nl.arange(128)[:, None], nl.arange(1024)[None, :] + 1024 * i0], value=v11[i3, i0, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
51
+ ' end loop i3 '
52
+ ' end loop i0 '
53
+ return v3
kernels/matmul_add_rmsnorm_M4096_N2048_K2048_0.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import neuronxcc.nki as nki
2
+ import neuronxcc.nki.language as nl
3
+
4
+
5
+ @nki.jit
6
+ def kernel(x_tensor, w_tensor, eps, z_tensor, g_tensor):
7
+ M, K = x_tensor.shape
8
+ K_, N = w_tensor.shape
9
+ # Make sure shapes match
10
+ assert N == g_tensor.shape[0]
11
+ assert K_ == K
12
+ assert N == w_tensor.shape[1]
13
+ assert M == z_tensor.shape[0]
14
+ assert N == z_tensor.shape[1]
15
+
16
+ TILE_M = 128
17
+ TILE_K = 128
18
+ TILE_N = 512 # nl.tile_size.gemm_moving_fmax
19
+ ix = nl.arange(TILE_M)[:, None]
20
+ iw = nl.arange(1)[:, None]
21
+ iy = nl.arange(N)[None, :]
22
+ iz = nl.arange(TILE_N)[None, :]
23
+ ik = nl.arange(K)[None, :]
24
+
25
+ result = nl.ndarray((M, N), dtype=x_tensor.dtype, buffer=nl.shared_hbm)
26
+ g_tile = nl.load(g_tensor.reshape((1, N))[iw, iy])
27
+ for i in nl.affine_range(M // TILE_M):
28
+ rmsnorm_in_tile = nl.ndarray((TILE_M, N), dtype=x_tensor.dtype, buffer=nl.sbuf)
29
+ x_tiles = nl.load(x_tensor[i * TILE_M + ix, ik])
30
+ for n in nl.affine_range(N // TILE_N):
31
+ res_psum = nl.zeros((TILE_M, TILE_N), nl.float32, buffer=nl.psum)
32
+ for k in nl.affine_range(K // TILE_K):
33
+ w_tile = nl.load(w_tensor[k * TILE_K: (k + 1) * TILE_K, n * TILE_N: (n + 1) * TILE_N])
34
+ res_psum += nl.matmul(x_tiles[:, k * TILE_K: (k + 1) * TILE_K], w_tile)
35
+ res_sb = nl.copy(res_psum, dtype=result.dtype)
36
+ rmsnorm_in_tile[ix, n * TILE_N + iz] = res_sb
37
+ z_tile = nl.load(z_tensor[i * TILE_M + ix, iy])
38
+ a_tile = nl.add(rmsnorm_in_tile, z_tile)
39
+ in_square = nl.square(a_tile)
40
+ square_sum = nl.sum(in_square, axis=[1])
41
+ mean = square_sum / N # Changed from K to N - normalize over output dimension
42
+ mean = nl.add(mean, eps)
43
+ rms_reciprocal = nl.rsqrt(mean)
44
+ rmsnorm_out_tile = nl.multiply(a_tile, rms_reciprocal)
45
+ g_bcast = g_tile.broadcast_to((TILE_M, N))
46
+ rmsnorm_out_tile[...] = nl.multiply(rmsnorm_out_tile, g_bcast)
47
+ nl.store(result[i * TILE_M + ix, iy], value=rmsnorm_out_tile)
48
+ return result
kernels/rmsnorm_matmul_M4096_N2048_K1024_0.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v3 = nl.ndarray((32, 128, 2048), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v4 = nl.ndarray((nl.par_dim(128), 1), dtype=np.float32, name='memset.174', buffer=nl.sbuf)
20
+ v5 = nl.shared_constant(np.identity(128, dtype=np.float32))
21
+ v6 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_168', buffer=nl.sbuf)
22
+ v7 = nl.ndarray((4, 8, nl.par_dim(128), 1024), dtype=np.float32, name='input_tensor_local_112', buffer=nl.sbuf)
23
+ v8 = nl.ndarray((4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
24
+ v9 = nl.ndarray((4, 8, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v10 = nl.ndarray((4, 8, nl.par_dim(128), 1024), dtype=np.float32, name='input_tensor_local_118', buffer=nl.sbuf)
26
+ v11 = nl.ndarray((4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
27
+ v12 = nl.zeros((4, 8, 2, 4, nl.par_dim(128), 128), dtype=np.float32, name='93.164', buffer=nl.psum, lazy_initialization=True)
28
+ v13 = nl.ndarray((2, 4, 4, 8, nl.par_dim(128), 128), dtype=np.float32, name='t26_pftranspose_93', buffer=nl.sbuf)
29
+ v14 = nl.ndarray((4, 2, 2, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v15 = nl.zeros((2, 4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
31
+ v16 = nl.ndarray((4, 8, 2, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v17 = nl.ndarray((4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
33
+ v18 = nl.ndarray((4, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
34
+ v19 = nl.ndarray((4, 8, 2, nl.par_dim(128), 1), dtype=np.float32, name='', buffer=nl.sbuf)
35
+ v4[nl.arange(128)[:, None], 0] = nisa.memset(shape=(128, 1), value=np.dtype(np.uint16).type(0), dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
36
+ v6[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v5[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
37
+ for i0 in nl.affine_range(4):
38
+ for i1 in nl.affine_range(8):
39
+ v7[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v1[8 * i0 + i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
40
+ for i2 in nl.affine_range(2):
41
+ v17[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.square, data=v7[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], bias=v4[nl.arange(128)[:, None], 0], scale=1.0, mask=None, dtype=np.float32)
42
+ v18[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v17[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.multiply, operand0=np.dtype(np.float32).type(0.0009765625), reverse0=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
43
+ v19[i0, i1, i2, nl.arange(128)[:, None], 0] = nisa.tensor_reduce(nl.add, data=v18[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, axis=[1], dtype=np.float32, negate=False)
44
+ v8[i0, i1, nl.arange(128)[:, None], 0] = nl.loop_reduce(v19[i0, i1, i2, nl.arange(128)[:, None], 0], op=np.add, loop_indices=[i2], mask=None, dtype=np.float32)
45
+ ' end loop i2 '
46
+ v9[i0, i1, nl.arange(128)[:, None], 0] = nisa.activation(op=nl.rsqrt, data=v8[i0, i1, nl.arange(128)[:, None], 0], bias=v4[nl.arange(128)[:, None], 0], scale=1.0, mask=None, dtype=np.float32)
47
+ v10[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v1[8 * i0 + i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
48
+ for i3 in nl.affine_range(2):
49
+ v11[i0, i1, i3, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v10[i0, i1, nl.arange(128)[:, None], 512 * i3 + nl.arange(512)[None, :]], op0=nl.multiply, operand0=v9[i0, i1, nl.arange(128)[:, None], 0], reverse0=False, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
50
+ for i4 in nl.affine_range(4):
51
+ v12[i0, i1, i3, i4, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v11[i0, i1, i3, nl.arange(128)[:, None], 128 * i4 + nl.arange(128)[None, :]], v6[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
52
+ v13[i3, i4, i0, i1, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.copy(v12[i0, i1, i3, i4, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
53
+ ' end loop i4 '
54
+ ' end loop i3 '
55
+ ' end loop i1 '
56
+ for i5 in nl.affine_range(2):
57
+ for i6 in nl.affine_range(2):
58
+ for i7 in nl.affine_range(4):
59
+ v14[i0, i5, i6, i7, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i7 + 4 * i6, nl.arange(128)[:, None], 1024 * i5 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
60
+ ' end loop i7 '
61
+ ' end loop i6 '
62
+ for i8 in nl.affine_range(8):
63
+ for i9 in nl.affine_range(2):
64
+ for i10 in nl.affine_range(2):
65
+ for i11 in nl.affine_range(4):
66
+ v15[i5, i0, i8, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v13[i10, i11, i0, i8, nl.arange(128)[:, None], nl.arange(128)[None, :]], v14[i0, i5, i10, i11, nl.arange(128)[:, None], 512 * i9 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
67
+ ' end loop i11 '
68
+ ' end loop i10 '
69
+ v16[i0, i8, i5, nl.arange(128)[:, None], 512 * i9 + nl.arange(512)[None, :]] = nl.copy(v15[i5, i0, i8, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
70
+ ' end loop i9 '
71
+ nl.store(v3[i8 + 8 * i0, nl.arange(128)[:, None], 1024 * i5 + nl.arange(1024)[None, :]], value=v16[i0, i8, i5, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
72
+ ' end loop i8 '
73
+ ' end loop i5 '
74
+ ' end loop i0 '
75
+ return v3
kernels/rope_single_freq_apply_B1_H64_N4096_D128_0.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import neuronxcc.nki as nki
2
+ import neuronxcc.nki.isa as nisa
3
+ import neuronxcc.nki.language as nl
4
+
5
+ @nki.jit
6
+ def kernel(x_in, cos, sin):
7
+ """
8
+ Applies rotary position embeddings.
9
+ Expected layout:
10
+ x_in: [d_head, S]
11
+ cos, sin: [d_head // 2, S]
12
+
13
+ This implementation uses first and second halves i.e.,
14
+
15
+ result[:d_head/2] = embedding[:d_head/2] * cos - embedding[d_head/2:] * sin
16
+ result[d_head/2:] = embedding[d_head/2:] * cos + embedding[:d_head/2] * sin
17
+ """
18
+
19
+ d_head, S = x_in.shape
20
+ half_d = d_head // 2
21
+ assert d_head <= 128
22
+ assert tuple(cos.shape) == (half_d, S)
23
+ assert cos.shape == sin.shape
24
+
25
+ x_out = nl.ndarray((d_head, S), dtype=x_in.dtype, buffer=nl.shared_hbm)
26
+ # Indices for selecting upper, lower partitions.
27
+ i_upper = nl.arange(half_d)[:, None]
28
+ i_lower = i_upper + half_d
29
+
30
+ # Tile along the S dimension.
31
+ tile_size_S = 512
32
+ i_dh = nl.arange(d_head)[:, None]
33
+ i_S = nl.arange(tile_size_S)[None, :]
34
+ for i_S_offset in range(0, S, tile_size_S):
35
+ # Load input tensor.
36
+ x_in_sb = nl.ndarray((d_head, tile_size_S), dtype=x_in.dtype, buffer=nl.sbuf)
37
+ x_in_sb[i_dh, i_S] = nl.load(x_in[i_dh, i_S + i_S_offset])
38
+
39
+ # Pack cos and sin on partition dimension to save sbuf usage.
40
+ sb_coeff = nl.ndarray((d_head, tile_size_S), dtype=x_in.dtype, buffer=nl.sbuf)
41
+ sb_cos = sb_coeff[i_upper, i_S]
42
+ sb_sin = sb_coeff[i_lower, i_S]
43
+ sb_cos = nl.load(cos[i_upper, i_S + i_S_offset])
44
+ sb_sin = nl.load(sin[i_upper, i_S + i_S_offset])
45
+
46
+ x_out_sb = nl.ndarray((d_head, tile_size_S), dtype=x_in.dtype, buffer=nl.sbuf)
47
+
48
+ # Inlined RoPE_sbuf implementation
49
+ sb_e = x_in_sb[i_upper, i_S]
50
+ # copy to make sure tensortensor have both inputs with the same base partition
51
+ sb_o = nl.copy(x_in_sb[i_lower, i_S])
52
+
53
+ e_cos_sin = nl.ndarray((d_head, tile_size_S), dtype=x_in_sb.dtype, buffer=nl.sbuf)
54
+ e_cos = e_cos_sin[i_upper, i_S]
55
+ e_sin = e_cos_sin[i_lower, i_S]
56
+
57
+ o_cos_sin = nl.ndarray((d_head, tile_size_S), dtype=x_in_sb.dtype, buffer=nl.sbuf)
58
+ o_cos = o_cos_sin[i_upper, i_S]
59
+ o_sin = o_cos_sin[i_lower, i_S]
60
+
61
+ e_cos = nisa.tensor_tensor(sb_e, sb_cos, nl.multiply)
62
+ o_cos = nisa.tensor_tensor(sb_o, sb_cos, nl.multiply)
63
+ e_sin = nisa.tensor_tensor(sb_e, sb_sin, nl.multiply)
64
+ o_sin = nisa.tensor_tensor(sb_o, sb_sin, nl.multiply)
65
+
66
+ x_out_sb[i_upper, i_S] = nisa.tensor_tensor(e_cos, o_sin, nl.subtract) # even * cos - odd * sin
67
+ x_out_sb[i_lower, i_S] = nisa.tensor_tensor(o_cos, e_sin, nl.add) # odd * cos + even * sin
68
+
69
+ # Store output tensor.
70
+ nl.store(x_out[i_dh, i_S + i_S_offset], x_out_sb)
71
+ return x_out
kernels/silu_M4096_N7168_0.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v2 = nl.ndarray((128, 32, 7168), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v3 = nl.ndarray((nl.par_dim(128), 1), dtype=np.float32, name='memset.172', buffer=nl.sbuf)
20
+ v4 = nl.ndarray((32, 7, nl.par_dim(128), 1024), dtype=np.float32, name='x_local_152', buffer=nl.sbuf)
21
+ v5 = nl.ndarray((32, 7, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
22
+ v6 = nl.ndarray((32, 7, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
23
+ v7 = nl.ndarray((32, 7, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
24
+ v8 = nl.ndarray((32, 7, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
25
+ v3[nl.arange(128)[:, None], 0] = nisa.memset(shape=(128, 1), value=np.dtype(np.uint16).type(0), dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
26
+ for i0 in nl.affine_range(32):
27
+ for i1 in nl.affine_range(7):
28
+ v4[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v1[nl.arange(128)[:, None], i0, nl.arange(1024)[None, :] + 1024 * i1], dtype=np.float32, mask=None)
29
+ for i2 in nl.affine_range(2):
30
+ v6[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.exp, data=v4[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], bias=v3[nl.arange(128)[:, None], 0], scale=-1.0, mask=None, dtype=np.float32)
31
+ v7[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v6[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.add, operand0=np.dtype(np.float32).type(1), reverse0=True, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
32
+ v8[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.reciprocal(data=v7[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
33
+ v5[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]] = nl.multiply(v4[i0, i1, nl.arange(128)[:, None], 512 * i2 + nl.arange(512)[None, :]], v8[i0, i1, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
34
+ ' end loop i2 '
35
+ nl.store(v2[nl.arange(128)[:, None], i0, nl.arange(1024)[None, :] + 1024 * i1], value=v5[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
36
+ ' end loop i1 '
37
+ ' end loop i0 '
38
+ return v2
kernels/swiglu_M4096_N3072_K1024_0.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2, v3, v4):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v5 = nl.ndarray((32, 128, 1024), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v6 = nl.ndarray((nl.par_dim(128), 1), dtype=np.float32, name='memset.200', buffer=nl.sbuf)
20
+ v7 = nl.shared_constant(np.identity(128, dtype=np.float32))
21
+ v8 = nl.ndarray((nl.par_dim(128), 128), dtype=np.float32, name='identity_local_182', buffer=nl.sbuf)
22
+ v9 = nl.ndarray((3, 8, nl.par_dim(128), 1024), dtype=np.float32, name='w_gate_local_109', buffer=nl.sbuf)
23
+ v10 = nl.ndarray((3, 8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
24
+ v11 = nl.zeros((3, 8, 4, 8, nl.par_dim(128), 128), dtype=np.float32, name='95.178', buffer=nl.psum, lazy_initialization=True)
25
+ v12 = nl.ndarray((3, 8, nl.par_dim(128), 8, 512), dtype=np.float32, name='', buffer=nl.sbuf)
26
+ v13 = nl.zeros((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
27
+ v14 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
28
+ v15 = nl.ndarray((3, 8, nl.par_dim(128), 1024), dtype=np.float32, name='w_up_local_118', buffer=nl.sbuf)
29
+ v16 = nl.ndarray((3, 8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
30
+ v17 = nl.zeros((3, 8, 4, 8, nl.par_dim(128), 128), dtype=np.float32, name='95.193', buffer=nl.psum, lazy_initialization=True)
31
+ v18 = nl.ndarray((3, 8, nl.par_dim(128), 8, 512), dtype=np.float32, name='', buffer=nl.sbuf)
32
+ v19 = nl.zeros((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
33
+ v20 = nl.ndarray(shape=(3, 8, 8, 128, 512), dtype=np.float32, name='_spill_163', buffer=nl.hbm)
34
+ v21 = nl.ndarray((3, 8, nl.par_dim(128), 1024), dtype=np.float32, name='w_down_local_126', buffer=nl.sbuf)
35
+ v22 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='_reload_166', buffer=nl.sbuf)
36
+ v23 = nl.zeros((8, 4, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
37
+ v24 = nl.ndarray((8, 4, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
38
+ v25 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
39
+ v26 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
40
+ v27 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
41
+ v28 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
42
+ v29 = nl.ndarray((3, 8, 8, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.sbuf)
43
+ v6[nl.arange(128)[:, None], 0] = nisa.memset(shape=(128, 1), value=np.dtype(np.uint16).type(0), dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
44
+ v8[nl.arange(128)[:, None], nl.arange(128)[None, :]] = nl.load(v7[nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
45
+ for i0 in nl.affine_range(3):
46
+ for i1 in nl.affine_range(8):
47
+ v9[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v4[i1, nl.arange(128)[:, None], 1024 * i0 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
48
+ ' end loop i1 '
49
+ for i2 in nl.affine_range(8):
50
+ for i3 in nl.affine_range(4):
51
+ v10[i0, i2, i3, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[i2, i3, nl.arange(128)[:, None, None], nl.arange(8)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
52
+ for i4 in nl.affine_range(8):
53
+ v11[i0, i2, i3, i4, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v10[i0, i2, i3, nl.arange(128)[:, None], 128 * i4 + nl.arange(128)[None, :]], v8[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
54
+ v12[i0, i2, nl.arange(128)[:, None], i4, 128 * i3 + nl.arange(128)[None, :]] = nl.copy(v11[i0, i2, i3, i4, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
55
+ ' end loop i4 '
56
+ ' end loop i3 '
57
+ for i5 in nl.affine_range(8):
58
+ for i6 in nl.affine_range(8):
59
+ v13[i0, i2, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v9[i0, i6, nl.arange(128)[:, None], 128 * i5 + nl.arange(128)[None, :]], v12[i0, i2, nl.arange(128)[:, None], i6, nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
60
+ ' end loop i6 '
61
+ v14[i0, i5, i2, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.copy(v13[i0, i2, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
62
+ ' end loop i5 '
63
+ ' end loop i2 '
64
+ ' end loop i0 '
65
+ for i7 in nl.affine_range(3):
66
+ for i8 in nl.affine_range(8):
67
+ v15[i7, i8, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[i8, nl.arange(128)[:, None], 1024 * i7 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
68
+ ' end loop i8 '
69
+ for i9 in nl.affine_range(8):
70
+ for i10 in nl.affine_range(4):
71
+ v16[i7, i9, i10, nl.arange(128)[:, None, None], 128 * nl.arange(8)[None, :, None] + nl.arange(128)[None, None, :]] = nl.load(v1[i9, i10, nl.arange(128)[:, None, None], nl.arange(8)[None, :, None], nl.arange(128)[None, None, :]], dtype=np.float32, mask=None)
72
+ for i11 in nl.affine_range(8):
73
+ v17[i7, i9, i10, i11, nl.arange(128)[:, None], nl.arange(128)[None, :]] = nisa.nc_matmul(v16[i7, i9, i10, nl.arange(128)[:, None], 128 * i11 + nl.arange(128)[None, :]], v8[nl.arange(128)[:, None], nl.arange(128)[None, :]], is_stationary_onezero=False, is_moving_onezero=True, mask=None, is_transpose=True)
74
+ v18[i7, i9, nl.arange(128)[:, None], i11, 128 * i10 + nl.arange(128)[None, :]] = nl.copy(v17[i7, i9, i10, i11, nl.arange(128)[:, None], nl.arange(128)[None, :]], dtype=np.float32, mask=None)
75
+ ' end loop i11 '
76
+ ' end loop i10 '
77
+ for i12 in nl.affine_range(8):
78
+ for i13 in nl.affine_range(8):
79
+ v19[i7, i12, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v15[i7, i13, nl.arange(128)[:, None], 128 * i12 + nl.arange(128)[None, :]], v18[i7, i9, nl.arange(128)[:, None], i13, nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
80
+ ' end loop i13 '
81
+ v25[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.activation(op=nl.exp, data=v14[i7, i12, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], bias=v6[nl.arange(128)[:, None], 0], scale=-1.0, mask=None, dtype=np.float32)
82
+ v26[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.tensor_scalar(data=v25[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], op0=nl.add, operand0=np.dtype(np.float32).type(1), reverse0=True, dtype=np.float32, mask=None, engine=nki.isa.unknown_engine)
83
+ v27[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nisa.reciprocal(data=v26[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
84
+ v28[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.multiply(v14[i7, i12, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], v27[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
85
+ v29[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.multiply(v28[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], v19[i7, i12, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None, dtype=np.float32)
86
+ nl.store(v20[i7, i12, i9, nl.arange(128)[:, None], nl.arange(512)[None, :]], value=v29[i7, i9, i12, nl.arange(128)[:, None], nl.arange(512)[None, :]], mask=None)
87
+ ' end loop i12 '
88
+ ' end loop i9 '
89
+ ' end loop i7 '
90
+ for i14 in nl.affine_range(3):
91
+ for i15 in nl.affine_range(8):
92
+ v21[i14, i15, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v3[i15 + 8 * i14, nl.arange(128)[:, None], nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
93
+ ' end loop i15 '
94
+ ' end loop i14 '
95
+ for i16 in nl.affine_range(8):
96
+ for i17 in nl.affine_range(3):
97
+ for i18 in nl.affine_range(8):
98
+ v22[i17, i18, i16, nl.arange(128)[:, None], nl.arange(512)[None, :]] = nl.load(v20[i17, i18, i16, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
99
+ ' end loop i18 '
100
+ ' end loop i17 '
101
+ for i19 in nl.affine_range(4):
102
+ for i20 in nl.affine_range(2):
103
+ for i21 in nl.affine_range(3):
104
+ for i22 in nl.affine_range(8):
105
+ v23[i16, i19, i20, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v22[i21, i22, i16, nl.arange(128)[:, None], 128 * i19 + nl.arange(128)[None, :]], v21[i21, i22, nl.arange(128)[:, None], 512 * i20 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=None)
106
+ ' end loop i22 '
107
+ ' end loop i21 '
108
+ v24[i16, i19, nl.arange(128)[:, None], 512 * i20 + nl.arange(512)[None, :]] = nl.copy(v23[i16, i19, i20, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=None)
109
+ ' end loop i20 '
110
+ nl.store(v5[i19 + 4 * i16, nl.arange(128)[:, None], nl.arange(1024)[None, :]], value=v24[i16, i19, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=None)
111
+ ' end loop i19 '
112
+ ' end loop i16 '
113
+ return v5
kernels/transpose_matmul_M4096_K2048_N10944_0.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import neuronxcc.nki as nki
3
+ import neuronxcc.nki.language as nl
4
+ import neuronxcc.nki.typing as nt
5
+ import neuronxcc.nki.isa as nisa
6
+ from neuronxcc.nki import trace
7
+ from neuronxcc.nki.language import par_dim
8
+
9
+ @nki.jit
10
+ def kernel(v1, v2):
11
+ import numpy as np
12
+ import neuronxcc.nki as nki
13
+ import neuronxcc.nki.language as nl
14
+ import neuronxcc.nki.typing as nt
15
+ import neuronxcc.nki.isa as nisa
16
+ from neuronxcc.nki import trace
17
+ from neuronxcc.nki.language import par_dim
18
+ v3 = nl.ndarray((32, 128, 10944), dtype=np.float32, buffer=nl.shared_hbm)
19
+ v4 = nl.ndarray((4, 16, nl.par_dim(128), 1024), dtype=np.float32, name='lhs_local_41', buffer=nl.sbuf)
20
+ v5 = nl.ndarray((4, 11, 16, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
21
+ v6 = nl.zeros((4, 11, 8, 2, nl.par_dim(128), 512), dtype=np.float32, name='', buffer=nl.psum, lazy_initialization=True)
22
+ v7 = nl.ndarray((4, 8, 11, nl.par_dim(128), 1024), dtype=np.float32, name='', buffer=nl.sbuf)
23
+ for i0 in nl.affine_range(4):
24
+ for i1 in nl.affine_range(16):
25
+ v4[i0, i1, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v1[nl.arange(128)[:, None], i1, 1024 * i0 + nl.arange(1024)[None, :]], dtype=np.float32, mask=None)
26
+ ' end loop i1 '
27
+ for i2 in nl.affine_range(11):
28
+ for i3 in nl.affine_range(16):
29
+ v5[i0, i2, i3, nl.arange(128)[:, None], nl.arange(1024)[None, :]] = nl.load(v2[nl.arange(128)[:, None], i3, 1024 * i2 + nl.arange(1024)[None, :]], dtype=np.float32, mask=-1024 * i2 + -1 * nl.arange(1024)[None, :] + 10943 >= 0)
30
+ ' end loop i3 '
31
+ for i4 in nl.affine_range(8):
32
+ for i5 in nl.affine_range(2):
33
+ for i6 in nl.affine_range(16):
34
+ v6[i0, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]] += nisa.nc_matmul(v4[i0, i6, nl.arange(128)[:, None], nl.arange(128)[None, :] + 128 * i4], v5[i0, i2, i6, nl.arange(128)[:, None], 512 * i5 + nl.arange(512)[None, :]], is_stationary_onezero=False, is_moving_onezero=False, mask=-512 * i5 + -1024 * i2 + -1 * nl.arange(512)[None, :] + 10943 >= 0)
35
+ ' end loop i6 '
36
+ v7[i0, i4, i2, nl.arange(128)[:, None], 512 * i5 + nl.arange(512)[None, :]] = nl.copy(v6[i0, i2, i4, i5, nl.arange(128)[:, None], nl.arange(512)[None, :]], dtype=np.float32, mask=-512 * i5 + -1024 * i2 + -1 * nl.arange(512)[None, :] + 10943 >= 0)
37
+ ' end loop i5 '
38
+ nl.store(v3[8 * i0 + i4, nl.arange(128)[:, None], 1024 * i2 + nl.arange(1024)[None, :]], value=v7[i0, i4, i2, nl.arange(128)[:, None], nl.arange(1024)[None, :]], mask=-1024 * i2 + -1 * nl.arange(1024)[None, :] + 10943 >= 0)
39
+ ' end loop i4 '
40
+ ' end loop i2 '
41
+ ' end loop i0 '
42
+ return v3
reference/adamw_M10944_N2048_numpy_1.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 10944
5
+ N = 2048
6
+
7
+ def get_inputs():
8
+ theta = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
9
+ g = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
10
+ m = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
11
+ v = np.abs(np.random.normal(loc=0, scale=1.0, size=(M, N))).astype(np.float32)
12
+ return [theta, g, m, v]
13
+
14
+
15
+ def forward(theta, g, m, v):
16
+ theta_t = theta - 1e-5 * theta
17
+ m_t = 0.9 * m + 0.1 * g
18
+ v_t = 0.999 * v + 0.001 * g * g
19
+ v_hat = v_t * 1000
20
+ new_theta_t = theta_t - 0.01 * m_t / (np.sqrt(v_hat) + 1e-8)
21
+ return new_theta_t
22
+
23
+
24
+ def transform_to_nki_inputs(inputs):
25
+ tensor_inputs = []
26
+ tensor_inputs.append(np.reshape(inputs[0], (10944, 2048))) # input[0] -> tensor_input[0]
27
+ tensor_inputs.append(np.reshape(inputs[1], (10944, 2048))) # input[1] -> tensor_input[1]
28
+ tensor_inputs.append(np.reshape(inputs[2], (10944, 2048))) # input[2] -> tensor_input[2]
29
+ tensor_inputs.append(np.reshape(inputs[3], (10944, 2048))) # input[3] -> tensor_input[3]
30
+
31
+ return tensor_inputs
32
+
33
+
34
+
35
+ def transform_nki_outputs(k_res, ref):
36
+ # Ensure outputs are in tuple form
37
+ if not isinstance(k_res, tuple):
38
+ k_res = (k_res,)
39
+
40
+ refs = ref if isinstance(ref, tuple) else (ref,)
41
+ k_outs = []
42
+
43
+ for v, r in zip(k_res, refs):
44
+ if hasattr(r, "shape"):
45
+ k_outs.append(np.reshape(v, r.shape))
46
+ else:
47
+ k_outs.append(v)
48
+
49
+ return k_outs
reference/add_rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 2048
6
+ K = 1024
7
+
8
+ def get_inputs():
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ eps = 1e-5
12
+ z = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
13
+ g = np.random.normal(loc=0, scale=1.0, size=(K,)).astype(np.float32)
14
+ return [x, w, eps, z, g]
15
+
16
+
17
+ def forward(x, w, eps, z, g):
18
+ y = x + z
19
+ t = np.square(y)
20
+ t = np.divide(t, y.shape[-1])
21
+ t = np.sum(t, axis=-1, keepdims=True)
22
+ t = (t + eps).astype(y.dtype)
23
+ y = y / np.sqrt(t)
24
+ y = y * g
25
+ return np.matmul(y, w)
26
+
27
+
28
+
29
+ def transform_to_nki_inputs(inputs):
30
+ return inputs
31
+
32
+
33
+
34
+ def transform_nki_outputs(k_res, ref):
35
+ # Ensure outputs are in tuple form
36
+ if not isinstance(k_res, tuple):
37
+ k_res = (k_res,)
38
+
39
+ refs = ref if isinstance(ref, tuple) else (ref,)
40
+ k_outs = []
41
+
42
+ for v, r in zip(k_res, refs):
43
+ if hasattr(r, "shape"):
44
+ k_outs.append(np.reshape(v, r.shape))
45
+ else:
46
+ k_outs.append(v)
47
+
48
+ return k_outs
reference/bmm_B16_M4096_K64_N4096_numpy_1.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ B = 16
5
+ K = 64
6
+ M = 4096
7
+ N = 4096
8
+
9
+ def get_inputs():
10
+ lhs = np.random.normal(loc=0, scale=1.0, size=(B, M, K)).astype(np.float32)
11
+ rhs = np.random.normal(loc=0, scale=1.0, size=(B, K, N)).astype(np.float32)
12
+ return [lhs, rhs]
13
+
14
+
15
+ def forward(lhs, rhs):
16
+ return np.matmul(lhs, rhs)
17
+
18
+
19
+ def transform_to_nki_inputs(inputs):
20
+ tensor_inputs = []
21
+ tensor_inputs.append(np.reshape(inputs[0], (16, 4096, 64))) # input[0] -> tensor_input[0]
22
+ tensor_inputs.append(np.reshape(inputs[1], (16, 64, 4096))) # input[1] -> tensor_input[1]
23
+
24
+ return tensor_inputs
25
+
26
+
27
+
28
+ def transform_nki_outputs(k_res, ref):
29
+ # Ensure outputs are in tuple form
30
+ if not isinstance(k_res, tuple):
31
+ k_res = (k_res,)
32
+
33
+ refs = ref if isinstance(ref, tuple) else (ref,)
34
+ k_outs = []
35
+
36
+ for v, r in zip(k_res, refs):
37
+ if hasattr(r, "shape"):
38
+ k_outs.append(np.reshape(v, r.shape))
39
+ else:
40
+ k_outs.append(v)
41
+
42
+ return k_outs
reference/bmm_softmax_B16_K64_M4096_N4096_numpy_1.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ B = 16
5
+ K = 64
6
+ M = 4096
7
+ N = 4096
8
+
9
+ def get_inputs():
10
+ lhs = np.random.normal(loc=0, scale=1.0, size=(B, M, K)).astype(np.float32)
11
+ rhs = np.random.normal(loc=0, scale=1.0, size=(B, K, N)).astype(np.float32)
12
+ return [lhs, rhs]
13
+
14
+
15
+ def forward(lhs, rhs):
16
+ x = np.matmul(lhs, rhs)
17
+ max_x = np.max(x, axis=2, keepdims=True)
18
+ exp_x = np.exp(x - max_x)
19
+ sum_exp = np.sum(exp_x, axis=2, keepdims=True)
20
+ return exp_x / sum_exp
21
+
22
+
23
+ def transform_to_nki_inputs(inputs):
24
+ tensor_inputs = []
25
+ tensor_inputs.append(np.reshape(inputs[0], (16, 4096, 64))) # input[0] -> tensor_input[0]
26
+ tensor_inputs.append(np.reshape(inputs[1], (16, 64, 4096))) # input[1] -> tensor_input[1]
27
+
28
+ return tensor_inputs
29
+
30
+
31
+
32
+ def transform_nki_outputs(k_res, ref):
33
+ # Ensure outputs are in tuple form
34
+ if not isinstance(k_res, tuple):
35
+ k_res = (k_res,)
36
+
37
+ refs = ref if isinstance(ref, tuple) else (ref,)
38
+ k_outs = []
39
+
40
+ for v, r in zip(k_res, refs):
41
+ if hasattr(r, "shape"):
42
+ k_outs.append(np.reshape(v, r.shape))
43
+ else:
44
+ k_outs.append(v)
45
+
46
+ return k_outs
reference/gqa_full_B1_N4096_QH16_KH8_D128_numpy_2.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ B = 1
5
+ N = 4096
6
+ QH = 16
7
+ KH = 8
8
+ D = 128
9
+
10
+ def get_inputs():
11
+ q = np.random.normal(loc=0, scale=1.0, size=(B, N, QH, D)).astype(np.float32)
12
+ k = np.random.normal(loc=0, scale=1.0, size=(B, N, KH, D)).astype(np.float32)
13
+ v = np.random.normal(loc=0, scale=1.0, size=(B, N, KH, D)).astype(np.float32)
14
+ return [q, k, v]
15
+
16
+
17
+ def forward(q, k, v):
18
+ n_rep = QH // KH
19
+ xk = np.repeat(k, n_rep, axis=2)
20
+ xv = np.repeat(v, n_rep, axis=2)
21
+ xq = q.transpose(0, 2, 1, 3)
22
+ xk = xk.transpose(0, 2, 1, 3)
23
+ xv = xv.transpose(0, 2, 1, 3)
24
+
25
+ attention = (xq @ xk.transpose(0, 1, 3, 2)) / np.float32(np.sqrt(D))
26
+ exp_attention = np.exp(attention - np.max(attention, axis=-1, keepdims=True))
27
+ attention = exp_attention / np.sum(exp_attention, axis=-1, keepdims=True)
28
+
29
+ output = attention @ xv
30
+ return output
31
+
32
+
33
+
34
+ def transform_to_nki_inputs(inputs):
35
+ tensor_inputs = []
36
+ tensor_inputs.append(np.reshape(inputs[0], (32, 128, 16, 128))) # input[0] -> tensor_input[0]
37
+ tensor_inputs.append(np.reshape(inputs[1], (1, 8, 4, 128, 8, 128))) # input[1] -> tensor_input[1]
38
+ tensor_inputs.append(np.reshape(inputs[2], (1, 32, 128, 1024))) # input[2] -> tensor_input[2]
39
+
40
+ return tensor_inputs
41
+
42
+
43
+
44
+ def transform_nki_outputs(k_res, ref):
45
+ # Ensure outputs are in tuple form
46
+ if not isinstance(k_res, tuple):
47
+ k_res = (k_res,)
48
+
49
+ refs = ref if isinstance(ref, tuple) else (ref,)
50
+ k_outs = []
51
+
52
+ for v, r in zip(k_res, refs):
53
+ if hasattr(r, "shape"):
54
+ k_outs.append(np.reshape(v, r.shape))
55
+ else:
56
+ k_outs.append(v)
57
+
58
+ return k_outs
reference/lora_M4096_N12288_K5120_R128_numpy_1.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 12288
6
+ K = 5120
7
+ R = 128
8
+
9
+ def get_inputs():
10
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
11
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
12
+ a = np.random.normal(loc=0, scale=1.0, size=(K, R)).astype(np.float32)
13
+ b = np.random.normal(loc=0, scale=1.0, size=(R, N)).astype(np.float32)
14
+ return [x, w, a, b]
15
+
16
+
17
+ def forward(x, w, a, b):
18
+ y1 = np.matmul(x, w)
19
+ y2 = np.matmul(np.matmul(x, a), b)
20
+ return y1 + y2
21
+
22
+
23
+ def transform_to_nki_inputs(inputs):
24
+ tensor_inputs = []
25
+ tensor_inputs.append(np.reshape(inputs[0], (8, 4, 128, 40, 128))) # input[0] -> tensor_input[0]
26
+ tensor_inputs.append(np.reshape(inputs[1], (40, 128, 12288))) # input[1] -> tensor_input[1]
27
+ tensor_inputs.append(np.reshape(inputs[2], (40, 128, 128))) # input[2] -> tensor_input[2]
28
+ tensor_inputs.append(np.reshape(inputs[3], (128, 12288))) # input[3] -> tensor_input[3]
29
+
30
+ return tensor_inputs
31
+
32
+
33
+
34
+ def transform_nki_outputs(k_res, ref):
35
+ # Ensure outputs are in tuple form
36
+ if not isinstance(k_res, tuple):
37
+ k_res = (k_res,)
38
+
39
+ refs = ref if isinstance(ref, tuple) else (ref,)
40
+ k_outs = []
41
+
42
+ for v, r in zip(k_res, refs):
43
+ if hasattr(r, "shape"):
44
+ k_outs.append(np.reshape(v, r.shape))
45
+ else:
46
+ k_outs.append(v)
47
+
48
+ return k_outs
reference/mamba_M7168_C256_S16_numpy_1.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 7168
5
+ C = 256
6
+ S = 16
7
+
8
+ def get_inputs():
9
+ delta = np.random.normal(loc=0, scale=0.05, size=(C, M)).astype(np.float32)
10
+ u = np.random.normal(loc=0, scale=0.05, size=(C, M)).astype(np.float32)
11
+ a = np.random.normal(loc=0, scale=0.05, size=(C, S)).astype(np.float32)
12
+ b = np.random.normal(loc=0, scale=0.05, size=(S, M)).astype(np.float32)
13
+ c = np.random.normal(loc=0, scale=0.05, size=(S, M)).astype(np.float32)
14
+ return [delta, u, a, b, c]
15
+
16
+
17
+ def forward(delta, u, a, b, c):
18
+ deltaA = np.exp(delta[:, None, :] * a[:, :, None])
19
+ deltaB_u = delta[:, None, :] * b[None, :, :] * u[:, None, :]
20
+ scan_res = np.ndarray((C, S, M), dtype=np.float32)
21
+ for i in range(M):
22
+ prev_state = scan_res[..., i - 1] if i > 0 else 0
23
+ scan_res[..., i] = deltaA[..., i] * prev_state + deltaB_u[..., i]
24
+ out = np.sum(c[None, :, :] * scan_res, axis=-2)
25
+ return out
26
+
27
+ def transform_to_nki_inputs(inputs):
28
+ return inputs
29
+
30
+ def transform_nki_outputs(k_res, ref):
31
+ # Ensure outputs are in tuple form
32
+ if not isinstance(k_res, tuple):
33
+ k_res = (k_res,)
34
+
35
+ refs = ref if isinstance(ref, tuple) else (ref,)
36
+ k_outs = []
37
+
38
+ for v, r in zip(k_res, refs):
39
+ if hasattr(r, "shape"):
40
+ k_outs.append(np.reshape(v, r.shape))
41
+ else:
42
+ k_outs.append(v)
43
+
44
+ return k_outs
reference/matmul_M4096_N12288_K5120_numpy_2.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 12288
6
+ K = 5120
7
+
8
+ def get_inputs():
9
+ lhs = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ rhs = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [lhs, rhs]
12
+
13
+
14
+ def forward(lhs, rhs):
15
+ return np.matmul(lhs, rhs)
16
+
17
+
18
+ def transform_to_nki_inputs(inputs):
19
+ tensor_inputs = []
20
+ tensor_inputs.append(np.reshape(inputs[0], (32, 128, 40, 128))) # input[0] -> tensor_input[0]
21
+ tensor_inputs.append(np.reshape(inputs[1], (40, 128, 12288))) # input[1] -> tensor_input[1]
22
+
23
+ return tensor_inputs
24
+
25
+
26
+
27
+ def transform_nki_outputs(k_res, ref):
28
+ # Ensure outputs are in tuple form
29
+ if not isinstance(k_res, tuple):
30
+ k_res = (k_res,)
31
+
32
+ refs = ref if isinstance(ref, tuple) else (ref,)
33
+ k_outs = []
34
+
35
+ for v, r in zip(k_res, refs):
36
+ if hasattr(r, "shape"):
37
+ k_outs.append(np.reshape(v, r.shape))
38
+ else:
39
+ k_outs.append(v)
40
+
41
+ return k_outs
reference/matmul_add_rmsnorm_M4096_N2048_K2048_numpy_1.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 2048
6
+ K = 2048
7
+
8
+ def get_inputs():
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ eps = 1e-5
12
+ z = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
13
+ g = np.random.normal(loc=0, scale=1.0, size=(N,)).astype(np.float32)
14
+ return [x, w, eps, z, g]
15
+
16
+
17
+ def forward(x, w, eps, z, g):
18
+ y = np.matmul(x, w) + z
19
+ rms = np.sqrt(np.mean(y ** 2, axis=-1, keepdims=True) + eps)
20
+ return y * g / rms
21
+
22
+
23
+ def transform_to_nki_inputs(inputs):
24
+ return inputs
25
+
26
+
27
+
28
+ def transform_nki_outputs(k_res, ref):
29
+ # Ensure outputs are in tuple form
30
+ if not isinstance(k_res, tuple):
31
+ k_res = (k_res,)
32
+
33
+ refs = ref if isinstance(ref, tuple) else (ref,)
34
+ k_outs = []
35
+
36
+ for v, r in zip(k_res, refs):
37
+ if hasattr(r, "shape"):
38
+ k_outs.append(np.reshape(v, r.shape))
39
+ else:
40
+ k_outs.append(v)
41
+
42
+ return k_outs
reference/rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 2048
6
+ K = 1024
7
+
8
+ def get_inputs():
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [x, w]
12
+
13
+
14
+ def forward(input_tensor, weight_matrix):
15
+ # RMSNorm calculations
16
+ squared_input = np.square(input_tensor)
17
+ scaled_square = np.divide(squared_input, K)
18
+
19
+ rms_sum = np.sum(scaled_square, axis=1, keepdims=True)
20
+ rms_norm = np.sqrt(rms_sum)
21
+
22
+ normalized = np.divide(input_tensor, rms_norm)
23
+
24
+ # Matrix multiplication
25
+ matmul_result = np.matmul(normalized, weight_matrix)
26
+
27
+ return matmul_result
28
+
29
+
30
+
31
+ def transform_to_nki_inputs(inputs):
32
+ tensor_inputs = []
33
+ tensor_inputs.append(np.reshape(inputs[0], (32, 128, 1024))) # input[0] -> tensor_input[0]
34
+ tensor_inputs.append(np.reshape(inputs[1], (8, 128, 2048))) # input[1] -> tensor_input[1]
35
+
36
+ return tensor_inputs
37
+
38
+
39
+
40
+ def transform_nki_outputs(k_res, ref):
41
+ # Ensure outputs are in tuple form
42
+ if not isinstance(k_res, tuple):
43
+ k_res = (k_res,)
44
+
45
+ refs = ref if isinstance(ref, tuple) else (ref,)
46
+ k_outs = []
47
+
48
+ for v, r in zip(k_res, refs):
49
+ if hasattr(r, "shape"):
50
+ k_outs.append(np.reshape(v, r.shape))
51
+ else:
52
+ k_outs.append(v)
53
+
54
+ return k_outs
reference/rope_single_freq_apply_B1_H64_N4096_D128_numpy_1.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ B = 1
4
+ H = 64
5
+ N = 4096
6
+ D = 128
7
+
8
+ def get_inputs():
9
+ x = np.random.normal(loc=0, scale=1.0, size=(D, B*H*N)).astype(np.float32)
10
+ freqs_cos = np.random.normal(loc=0, scale=1.0, size=(D // 2, B*H*N)).astype(np.float32)
11
+ freqs_sin = np.random.normal(loc=0, scale=1.0, size=(D // 2, B*H*N)).astype(np.float32)
12
+ return [x, freqs_cos, freqs_sin]
13
+
14
+ def forward(x, freqs_cos, freqs_sin):
15
+ half_h = D // 2
16
+ x0 = x[:half_h, :]
17
+ x1 = x[half_h:, :]
18
+ x_out_0 = x0 * freqs_cos - x1 * freqs_sin
19
+ x_out_1 = x0 * freqs_sin + x1 * freqs_cos
20
+ x_out = np.concatenate([x_out_0, x_out_1], axis=0)
21
+ return x_out
22
+
23
+ def transform_to_nki_inputs(inputs):
24
+ return inputs
25
+
26
+ def transform_nki_outputs(k_res, ref):
27
+ return (k_res,)
reference/silu_M4096_N7168_numpy_0.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 7168
6
+
7
+ def get_inputs():
8
+ x = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
9
+ return [x]
10
+
11
+
12
+ def forward(x):
13
+ return x / (1 + np.exp(-x))
14
+
15
+
16
+ def transform_to_nki_inputs(inputs):
17
+ tensor_inputs = []
18
+ tensor_inputs.append(np.reshape(inputs[0], (128, 32, 7168))) # input[0] -> tensor_input[0]
19
+
20
+ return tensor_inputs
21
+
22
+
23
+
24
+ def transform_nki_outputs(k_res, ref):
25
+ # Ensure outputs are in tuple form
26
+ if not isinstance(k_res, tuple):
27
+ k_res = (k_res,)
28
+
29
+ refs = ref if isinstance(ref, tuple) else (ref,)
30
+ k_outs = []
31
+
32
+ for v, r in zip(k_res, refs):
33
+ if hasattr(r, "shape"):
34
+ k_outs.append(np.reshape(v, r.shape))
35
+ else:
36
+ k_outs.append(v)
37
+
38
+ return k_outs
reference/swiglu_M4096_N3072_K1024_numpy_2.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 3072
6
+ K = 1024
7
+
8
+ def get_inputs():
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w_up = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ w_down = np.random.normal(loc=0, scale=1.0, size=(N, K)).astype(np.float32)
12
+ w_gate = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
13
+ return [x, w_up, w_down, w_gate]
14
+
15
+
16
+ def forward(x, w_up, w_down, w_gate):
17
+ up_feature = np.matmul(x, w_up)
18
+ gate_feature = np.matmul(x, w_gate)
19
+ activated_gate_feature = gate_feature / (1 + np.exp(-gate_feature))
20
+ return np.matmul(activated_gate_feature * up_feature, w_down)
21
+
22
+
23
+ def transform_to_nki_inputs(inputs):
24
+ tensor_inputs = []
25
+ tensor_inputs.append(np.reshape(inputs[0], (8, 4, 128, 8, 128))) # input[0] -> tensor_input[0]
26
+ tensor_inputs.append(np.reshape(inputs[1], (8, 128, 3072))) # input[1] -> tensor_input[1]
27
+ tensor_inputs.append(np.reshape(inputs[2], (24, 128, 1024))) # input[2] -> tensor_input[2]
28
+ tensor_inputs.append(np.reshape(inputs[3], (8, 128, 3072))) # input[3] -> tensor_input[3]
29
+
30
+ return tensor_inputs
31
+
32
+
33
+
34
+ def transform_nki_outputs(k_res, ref):
35
+ # Ensure outputs are in tuple form
36
+ if not isinstance(k_res, tuple):
37
+ k_res = (k_res,)
38
+
39
+ refs = ref if isinstance(ref, tuple) else (ref,)
40
+ k_outs = []
41
+
42
+ for v, r in zip(k_res, refs):
43
+ if hasattr(r, "shape"):
44
+ k_outs.append(np.reshape(v, r.shape))
45
+ else:
46
+ k_outs.append(v)
47
+
48
+ return k_outs
reference/transpose_matmul_M4096_K2048_N10944_numpy_1.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ M = 4096
5
+ N = 10944
6
+ K = 2048
7
+
8
+ def get_inputs():
9
+ lhs = np.random.normal(loc=0, scale=1.0, size=(K, M)).astype(np.float32)
10
+ rhs = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [lhs, rhs]
12
+
13
+
14
+ def forward(lhs, rhs):
15
+ lhs_t = np.transpose(lhs, axes=(1, 0))
16
+ return np.matmul(lhs_t, rhs)
17
+
18
+
19
+ def transform_to_nki_inputs(inputs):
20
+ tensor_inputs = []
21
+ tensor_inputs.append(np.reshape(inputs[0], (128, 16, 4096))) # input[0] -> tensor_input[0]
22
+ tensor_inputs.append(np.reshape(inputs[1], (128, 16, 10944))) # input[1] -> tensor_input[1]
23
+
24
+ return tensor_inputs
25
+
26
+
27
+
28
+ def transform_nki_outputs(k_res, ref):
29
+ # Ensure outputs are in tuple form
30
+ if not isinstance(k_res, tuple):
31
+ k_res = (k_res,)
32
+
33
+ refs = ref if isinstance(ref, tuple) else (ref,)
34
+ k_outs = []
35
+
36
+ for v, r in zip(k_res, refs):
37
+ if hasattr(r, "shape"):
38
+ k_outs.append(np.reshape(v, r.shape))
39
+ else:
40
+ k_outs.append(v)
41
+
42
+ return k_outs
seeds/adamw.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: adamw
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+
7
+ input: |
8
+ theta = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
9
+ g = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
10
+ m = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
11
+ v = np.abs(np.random.normal(loc=0, scale=1.0, size=(M, N))).astype(np.float32)
12
+ return [theta, g, m, v]
13
+
14
+ impl: |
15
+ def forward(theta, g, m, v):
16
+ theta_t = theta - 1e-5 * theta
17
+ m_t = 0.9 * m + 0.1 * g
18
+ v_t = 0.999 * v + 0.001 * g * g
19
+ v_hat = v_t * 1000
20
+ new_theta_t = theta_t - 0.01 * m_t / (np.sqrt(v_hat) + 1e-8)
21
+ return new_theta_t
22
+
23
+ comments: |
24
+ AdamW update at t=1, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01
seeds/add_rmsnorm_matmul.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: add_rmsnorm_matmul
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+
8
+ input: |
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ eps = 1e-5
12
+ z = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
13
+ g = np.random.normal(loc=0, scale=1.0, size=(K,)).astype(np.float32)
14
+ return [x, w, eps, z, g]
15
+
16
+ impl: |
17
+ def forward(x, w, eps, z, g):
18
+ y = x + z
19
+ t = np.square(y)
20
+ t = np.divide(t, y.shape[-1])
21
+ t = np.sum(t, axis=-1, keepdims=True)
22
+ t = (t + eps).astype(y.dtype)
23
+ y = y / np.sqrt(t)
24
+ y = y * g
25
+ return np.matmul(y, w)
seeds/bmm.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: bmm
2
+
3
+ parameters:
4
+ - B
5
+ - M
6
+ - K
7
+ - N
8
+
9
+ input: |
10
+ lhs = np.random.normal(loc=0, scale=1.0, size=(B, M, K)).astype(np.float32)
11
+ rhs = np.random.normal(loc=0, scale=1.0, size=(B, K, N)).astype(np.float32)
12
+ return [lhs, rhs]
13
+
14
+ impl: |
15
+ def forward(lhs, rhs):
16
+ return np.matmul(lhs, rhs)
seeds/bmm_softmax.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: bmm_softmax
2
+
3
+ parameters:
4
+ - B
5
+ - K
6
+ - M
7
+ - N
8
+
9
+ input: |
10
+ lhs = np.random.normal(loc=0, scale=1.0, size=(B, M, K)).astype(np.float32)
11
+ rhs = np.random.normal(loc=0, scale=1.0, size=(B, K, N)).astype(np.float32)
12
+ return [lhs, rhs]
13
+
14
+ impl: |
15
+ def forward(lhs, rhs):
16
+ x = np.matmul(lhs, rhs)
17
+ max_x = np.max(x, axis=2, keepdims=True)
18
+ exp_x = np.exp(x - max_x)
19
+ sum_exp = np.sum(exp_x, axis=2, keepdims=True)
20
+ return exp_x / sum_exp
seeds/gqa_full.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: gqa_full
2
+
3
+ parameters:
4
+ - B
5
+ - N
6
+ - QH
7
+ - KH
8
+ - D
9
+
10
+ input: |
11
+ q = np.random.normal(loc=0, scale=1.0, size=(B, N, QH, D)).astype(np.float32)
12
+ k = np.random.normal(loc=0, scale=1.0, size=(B, N, KH, D)).astype(np.float32)
13
+ v = np.random.normal(loc=0, scale=1.0, size=(B, N, KH, D)).astype(np.float32)
14
+ return [q, k, v]
15
+
16
+ impl: |
17
+ def forward(q, k, v):
18
+ n_rep = QH // KH
19
+ xk = np.repeat(k, n_rep, axis=2)
20
+ xv = np.repeat(v, n_rep, axis=2)
21
+ xq = q.transpose(0, 2, 1, 3)
22
+ xk = xk.transpose(0, 2, 1, 3)
23
+ xv = xv.transpose(0, 2, 1, 3)
24
+
25
+ attention = (xq @ xk.transpose(0, 1, 3, 2)) / np.float32(np.sqrt(D))
26
+ exp_attention = np.exp(attention - np.max(attention, axis=-1, keepdims=True))
27
+ attention = exp_attention / np.sum(exp_attention, axis=-1, keepdims=True)
28
+
29
+ output = attention @ xv
30
+ return output
31
+
32
+
seeds/lora.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: lora
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+ - R
8
+
9
+ input: |
10
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
11
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
12
+ a = np.random.normal(loc=0, scale=1.0, size=(K, R)).astype(np.float32)
13
+ b = np.random.normal(loc=0, scale=1.0, size=(R, N)).astype(np.float32)
14
+ return [x, w, a, b]
15
+
16
+ impl: |
17
+ def forward(x, w, a, b):
18
+ y1 = np.matmul(x, w)
19
+ y2 = np.matmul(np.matmul(x, a), b)
20
+ return y1 + y2
seeds/mamba.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: mamba
2
+
3
+ parameters:
4
+ - M
5
+ - C
6
+ - S
7
+
8
+ input: |
9
+ delta = np.random.normal(loc=0, scale=0.05, size=(C, M)).astype(np.float32)
10
+ u = np.random.normal(loc=0, scale=0.05, size=(C, M)).astype(np.float32)
11
+ a = np.random.normal(loc=0, scale=0.05, size=(C, S)).astype(np.float32)
12
+ b = np.random.normal(loc=0, scale=0.05, size=(S, M)).astype(np.float32)
13
+ c = np.random.normal(loc=0, scale=0.05, size=(S, M)).astype(np.float32)
14
+ return [delta, u, a, b, c]
15
+
16
+ impl: |
17
+ def forward(delta, u, a, b, c):
18
+ deltaA = np.exp(delta[:, None, :] * a[:, :, None])
19
+ deltaB_u = delta[:, None, :] * b[None, :, :] * u[:, None, :]
20
+ scan_res = np.ndarray((C, S, M), dtype=np.float32)
21
+ for i in range(M):
22
+ prev_state = scan_res[..., i - 1] if i > 0 else 0
23
+ scan_res[..., i] = deltaA[..., i] * prev_state + deltaB_u[..., i]
24
+ out = np.sum(c[None, :, :] * scan_res, axis=-2)
25
+ return out
seeds/matmul.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: matmul
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+
8
+ input: |
9
+ lhs = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ rhs = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [lhs, rhs]
12
+
13
+ impl: |
14
+ def forward(lhs, rhs):
15
+ return np.matmul(lhs, rhs)
seeds/matmul_add_rmsnorm.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: matmul_add_rmsnorm
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+
8
+ input: |
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ eps = 1e-5
12
+ z = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
13
+ g = np.random.normal(loc=0, scale=1.0, size=(N,)).astype(np.float32)
14
+ return [x, w, eps, z, g]
15
+
16
+ impl: |
17
+ def forward(x, w, eps, z, g):
18
+ y = np.matmul(x, w) + z
19
+ rms = np.sqrt(np.mean(y ** 2, axis=-1, keepdims=True) + eps)
20
+ return y * g / rms
seeds/rmsnorm_matmul.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: rmsnorm_matmul
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+
8
+ input: |
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [x, w]
12
+
13
+ impl: |
14
+ def forward(input_tensor, weight_matrix):
15
+ # RMSNorm calculations
16
+ squared_input = np.square(input_tensor)
17
+ scaled_square = np.divide(squared_input, K)
18
+
19
+ rms_sum = np.sum(scaled_square, axis=1, keepdims=True)
20
+ rms_norm = np.sqrt(rms_sum)
21
+
22
+ normalized = np.divide(input_tensor, rms_norm)
23
+
24
+ # Matrix multiplication
25
+ matmul_result = np.matmul(normalized, weight_matrix)
26
+
27
+ return matmul_result
28
+
29
+ comments: |
30
+ This is a simple RMSNorm (without learnable parameters) + Matmul kernel. NeuronPy compiler can lower this.
seeds/rope_single_freq_apply.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: rope_single_freq_apply
2
+
3
+ parameters:
4
+ - B
5
+ - H
6
+ - N
7
+ - D
8
+
9
+ input: |
10
+ x = np.random.normal(loc=0, scale=1.0, size=(D, B*H*N)).astype(np.float32)
11
+ freqs_cos = np.random.normal(loc=0, scale=1.0, size=(D // 2, B*H*N)).astype(np.float32)
12
+ freqs_sin = np.random.normal(loc=0, scale=1.0, size=(D // 2, B*H*N)).astype(np.float32)
13
+ return [x, freqs_cos, freqs_sin]
14
+
15
+ impl: |
16
+ def forward(x, freqs_cos, freqs_sin):
17
+ half_h = D // 2
18
+ x0 = x[:half_h, :]
19
+ x1 = x[half_h:, :]
20
+ x_out_0 = x0 * freqs_cos - x1 * freqs_sin
21
+ x_out_1 = x0 * freqs_sin + x1 * freqs_cos
22
+ x_out = np.concatenate([x_out_0, x_out_1], axis=0)
23
+ return x_out
seeds/silu.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: silu
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+
7
+ input: |
8
+ x = np.random.normal(loc=0, scale=1.0, size=(M, N)).astype(np.float32)
9
+ return [x]
10
+
11
+ impl: |
12
+ def forward(x):
13
+ return x / (1 + np.exp(-x))
seeds/swiglu.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: swiglu
2
+
3
+ parameters:
4
+ - M
5
+ - N
6
+ - K
7
+
8
+ input: |
9
+ x = np.random.normal(loc=0, scale=1.0, size=(M, K)).astype(np.float32)
10
+ w_up = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ w_down = np.random.normal(loc=0, scale=1.0, size=(N, K)).astype(np.float32)
12
+ w_gate = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
13
+ return [x, w_up, w_down, w_gate]
14
+
15
+ impl: |
16
+ def forward(x, w_up, w_down, w_gate):
17
+ up_feature = np.matmul(x, w_up)
18
+ gate_feature = np.matmul(x, w_gate)
19
+ activated_gate_feature = gate_feature / (1 + np.exp(-gate_feature))
20
+ return np.matmul(activated_gate_feature * up_feature, w_down)
seeds/transpose_matmul.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_name: transpose_matmul
2
+
3
+ parameters:
4
+ - M
5
+ - K
6
+ - N
7
+
8
+ input: |
9
+ lhs_t = np.random.normal(loc=0, scale=1.0, size=(K, M)).astype(np.float32)
10
+ rhs = np.random.normal(loc=0, scale=1.0, size=(K, N)).astype(np.float32)
11
+ return [lhs_t, rhs]
12
+
13
+ impl: |
14
+ def forward(lhs_t, rhs):
15
+ return np.matmul(np.transpose(lhs_t, axes=(1, 0)), rhs)
summary.json ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "swiglu": {
3
+ "seed": "./seeds/swiglu.yaml",
4
+ "cases": {
5
+ "2": {
6
+ "values": {
7
+ "K": 1024,
8
+ "M": 4096,
9
+ "N": 3072
10
+ },
11
+ "impls": [
12
+ {
13
+ "task": "./reference/swiglu_M4096_N3072_K1024_numpy_2.py",
14
+ "kernel": "./kernels/swiglu_M4096_N3072_K1024_0.py"
15
+ }
16
+ ]
17
+ }
18
+ }
19
+ },
20
+ "matmul_add_rmsnorm": {
21
+ "seed": "./seeds/matmul_add_rmsnorm.yaml",
22
+ "cases": {
23
+ "1": {
24
+ "values": {
25
+ "K": 2048,
26
+ "M": 4096,
27
+ "N": 2048
28
+ },
29
+ "impls": [
30
+ {
31
+ "task": "./reference/matmul_add_rmsnorm_M4096_N2048_K2048_numpy_1.py",
32
+ "kernel": "./kernels/matmul_add_rmsnorm_M4096_N2048_K2048_0.py"
33
+ }
34
+ ]
35
+ }
36
+ }
37
+ },
38
+ "add_rmsnorm_matmul": {
39
+ "seed": "./seeds/add_rmsnorm_matmul.yaml",
40
+ "cases": {
41
+ "2": {
42
+ "values": {
43
+ "K": 1024,
44
+ "M": 4096,
45
+ "N": 2048
46
+ },
47
+ "impls": [
48
+ {
49
+ "task": "./reference/add_rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py",
50
+ "kernel": "./kernels/add_rmsnorm_matmul_M4096_N2048_K1024_0.py"
51
+ }
52
+ ]
53
+ }
54
+ }
55
+ },
56
+ "matmul": {
57
+ "seed": "./seeds/matmul.yaml",
58
+ "cases": {
59
+ "3": {
60
+ "values": {
61
+ "K": 5120,
62
+ "M": 4096,
63
+ "N": 12288
64
+ },
65
+ "impls": [
66
+ {
67
+ "task": "./reference/matmul_M4096_N12288_K5120_numpy_2.py",
68
+ "kernel": "./kernels/matmul_M4096_N12288_K5120_0.py"
69
+ }
70
+ ]
71
+ }
72
+ }
73
+ },
74
+ "gqa_full": {
75
+ "seed": "./seeds/gqa_full.yaml",
76
+ "cases": {
77
+ "0": {
78
+ "values": {
79
+ "B": 1,
80
+ "D": 128,
81
+ "KH": 8,
82
+ "N": 4096,
83
+ "QH": 16
84
+ },
85
+ "impls": [
86
+ {
87
+ "task": "./reference/gqa_full_B1_N4096_QH16_KH8_D128_numpy_2.py",
88
+ "kernel": "./kernels/gqa_full_B1_N4096_QH16_KH8_D128_0.py"
89
+ }
90
+ ]
91
+ }
92
+ }
93
+ },
94
+ "rmsnorm_matmul": {
95
+ "seed": "./seeds/rmsnorm_matmul.yaml",
96
+ "cases": {
97
+ "2": {
98
+ "values": {
99
+ "K": 1024,
100
+ "M": 4096,
101
+ "N": 2048
102
+ },
103
+ "impls": [
104
+ {
105
+ "task": "./reference/rmsnorm_matmul_M4096_N2048_K1024_numpy_1.py",
106
+ "kernel": "./kernels/rmsnorm_matmul_M4096_N2048_K1024_0.py"
107
+ }
108
+ ]
109
+ }
110
+ }
111
+ },
112
+ "rope_single_freq_apply": {
113
+ "seed": "./seeds/rope_single_freq_apply.yaml",
114
+ "cases": {
115
+ "1": {
116
+ "values": {
117
+ "B": 1,
118
+ "H": 64,
119
+ "N": 4096,
120
+ "D": 128
121
+ },
122
+ "impls": [
123
+ {
124
+ "task": "./reference/rope_single_freq_apply_B1_H64_N4096_D128_numpy_1.py",
125
+ "kernel": "./kernels/rope_single_freq_apply_B1_H64_N4096_D128_0.py"
126
+ }
127
+ ]
128
+ }
129
+ }
130
+ },
131
+ "bmm": {
132
+ "seed": "./seeds/bmm.yaml",
133
+ "cases": {
134
+ "2": {
135
+ "values": {
136
+ "B": 16,
137
+ "K": 64,
138
+ "M": 4096,
139
+ "N": 4096
140
+ },
141
+ "impls": [
142
+ {
143
+ "task": "./reference/bmm_B16_M4096_K64_N4096_numpy_1.py",
144
+ "kernel": "./kernels/bmm_B16_M4096_K64_N4096_0.py"
145
+ }
146
+ ]
147
+ }
148
+ }
149
+ },
150
+ "bmm_softmax": {
151
+ "seed": "./seeds/bmm_softmax.yaml",
152
+ "cases": {
153
+ "2": {
154
+ "values": {
155
+ "B": 16,
156
+ "K": 64,
157
+ "M": 4096,
158
+ "N": 4096
159
+ },
160
+ "impls": [
161
+ {
162
+ "task": "./reference/bmm_softmax_B16_K64_M4096_N4096_numpy_1.py",
163
+ "kernel": "./kernels/bmm_softmax_B16_K64_M4096_N4096_0.py"
164
+ }
165
+ ]
166
+ }
167
+ }
168
+ },
169
+ "transpose_matmul": {
170
+ "seed": "./seeds/transpose_matmul.yaml",
171
+ "cases": {
172
+ "2": {
173
+ "values": {
174
+ "K": 2048,
175
+ "M": 4096,
176
+ "N": 10944
177
+ },
178
+ "impls": [
179
+ {
180
+ "task": "./reference/transpose_matmul_M4096_K2048_N10944_numpy_1.py",
181
+ "kernel": "./kernels/transpose_matmul_M4096_K2048_N10944_0.py"
182
+ }
183
+ ]
184
+ }
185
+ }
186
+ },
187
+ "lora": {
188
+ "seed": "./seeds/lora.yaml",
189
+ "cases": {
190
+ "2": {
191
+ "values": {
192
+ "K": 5120,
193
+ "M": 4096,
194
+ "N": 12288,
195
+ "R": 128
196
+ },
197
+ "impls": [
198
+ {
199
+ "task": "./reference/lora_M4096_N12288_K5120_R128_numpy_1.py",
200
+ "kernel": "./kernels/lora_M4096_N12288_K5120_R128_0.py"
201
+ }
202
+ ]
203
+ }
204
+ }
205
+ },
206
+ "adamw": {
207
+ "seed": "./seeds/adamw.yaml",
208
+ "cases": {
209
+ "2": {
210
+ "values": {
211
+ "M": 10944,
212
+ "N": 2048
213
+ },
214
+ "impls": [
215
+ {
216
+ "task": "./reference/adamw_M10944_N2048_numpy_1.py",
217
+ "kernel": "./kernels/adamw_M10944_N2048_0.py"
218
+ }
219
+ ]
220
+ }
221
+ }
222
+ },
223
+ "silu": {
224
+ "seed": "./seeds/silu.yaml",
225
+ "cases": {
226
+ "2": {
227
+ "values": {
228
+ "M": 4096,
229
+ "N": 7168
230
+ },
231
+ "impls": [
232
+ {
233
+ "task": "./reference/silu_M4096_N7168_numpy_0.py",
234
+ "kernel": "./kernels/silu_M4096_N7168_0.py"
235
+ }
236
+ ]
237
+ }
238
+ }
239
+ },
240
+ "mamba": {
241
+ "seed": "./seeds/mamba.yaml",
242
+ "cases": {
243
+ "2": {
244
+ "values": {
245
+ "C": 256,
246
+ "M": 7168,
247
+ "S": 16
248
+ },
249
+ "impls": [
250
+ {
251
+ "task": "./reference/mamba_M7168_C256_S16_numpy_1.py",
252
+ "kernel": "./kernels/mamba_M7168_C256_S16_0.py"
253
+ }
254
+ ]
255
+ }
256
+ }
257
+ }
258
+ }