NKIBench / kernel_wrapper.py
Genghan's picture
Add dataset card and NKIBench contents
1bb3ed7 verified
import json, uuid, time, tempfile
import os
import traceback
import numpy as np
import sys
import re
from pathlib import Path
from pydantic import BaseModel, Field
class KernelProperties(BaseModel):
"""
Single Kernel Execution
"""
compiled: bool = False
correct: bool = False
runnable: bool = False
metadata: dict = Field(default_factory=dict)
# Function to load module from file path
def load_module_from_path(file_path):
parent_dir = str(Path(file_path).parent)
if parent_dir not in sys.path:
sys.path.append(parent_dir)
import importlib.util
spec = importlib.util.spec_from_file_location("module", file_path)
if spec is None or spec.loader is None:
raise ImportError(f"Could not load module from {file_path}")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def l2norm_allclose(v_k, v_r, rel_tol=1e-5):
return np.linalg.norm((v_k - v_r).astype(np.float64)) < rel_tol * np.linalg.norm(v_r.astype(np.float64))
def check_correctness_numpy(output_nki, output_task, res, rel_tol=2e-5):
# output_nki is a list
# output_task is a tuple or a single array
if not isinstance(output_task, tuple):
output_task_tuple = (output_task,)
else:
output_task_tuple = output_task
is_correct = True
if len(output_nki) != len(output_task_tuple):
res.metadata.setdefault("correctness_error", []).append(
f"Num outputs mismatch: nki={len(output_nki)} vs ref={len(output_task_tuple)}"
)
res.correct = False
return
for i, (v_k, v_r) in enumerate(zip(output_nki, output_task_tuple)):
if hasattr(v_r, "shape") and hasattr(v_k, "shape"):
if v_k.shape != v_r.shape:
res.metadata.setdefault("correctness_error", []).append(f"Output {i} shape mismatch, expected {v_r.shape}, got {v_k.shape}; ")
is_correct = False
if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
max_diff = np.amax(np.abs(v_k - v_r))
avg_diff = np.mean(np.abs(v_k - v_r))
max_rel_diff = np.amax(np.abs(v_k - v_r) / np.abs(v_r))
l2norm_diff = np.linalg.norm((v_k - v_r).astype(np.float64))
l2norm_ref = np.linalg.norm(v_r.astype(np.float64))
l2norm_rel_diff = l2norm_diff / l2norm_ref
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, max diff {max_diff:.6f}, avg diff {avg_diff:.6f}, max rel diff {max_rel_diff:.6f}, l2norm diff {l2norm_diff:.6f}, l2norm ref {l2norm_ref:.6f}, l2norm rel diff {l2norm_rel_diff:.6f}")
is_correct = False
else:
# abs_diff = np.abs(v_k - v_r)
if np.issubdtype(type(v_r), np.floating) or np.issubdtype(type(v_k), np.floating):
if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k};")
is_correct = False
else:
if v_k != v_r:
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k}; ")
is_correct = False
res.correct = is_correct
def check_precision_and_correctness(program_path, output_nki, output_task, res, rel_tol):
with open(program_path, 'r') as f:
program_code = f.read()
# Remove all the comments
program_code = re.sub(r'#.*', '', program_code)
# If "bfloat16" or "float16" is used
if "float16" in program_code:
res.metadata["correctness_error"] = "Float16 is used in the program."
res.correct = False
return
check_correctness_numpy(output_nki, output_task, res, rel_tol=rel_tol)
import neuronxcc.nki as nki
def get_latency(nki_kernel_fn, nki_inputs, artifact_dir):
kernel_id = uuid.uuid4()
neff_path = os.path.join(artifact_dir, f"neff_{kernel_id}.neff")
ntff_path = os.path.join(artifact_dir, f"ntff_{kernel_id}.ntff")
nki.baremetal(
nki_kernel_fn,
save_neff_name=neff_path,
save_trace_name=ntff_path,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
summary_profile_path = os.path.join(artifact_dir, f"profile_{kernel_id}.json")
summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
os.system(summary_profile_cmd)
summary = json.load(open(summary_profile_path, 'r'))
latency_ms = summary[next(iter(summary))]["total_time"] * 1e3
return latency_ms
def benchmark_latency(warmpup_iterations, benchmark_iterations, nki_kernel_fn, nki_inputs, artifact_dir):
for _ in range(warmpup_iterations):
nki.baremetal(
nki_kernel_fn,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
latency_ms_list = []
for _ in range(benchmark_iterations):
latency_ms = get_latency(nki_kernel_fn, nki_inputs, artifact_dir)
latency_ms_list.append(latency_ms)
runtime_stats = {
"mean_ms": np.mean(latency_ms_list),
"min_ms": np.min(latency_ms_list),
"max_ms": np.max(latency_ms_list),
"rel_diffs": (np.max(latency_ms_list) - np.min(latency_ms_list)) / np.min(latency_ms_list)
}
return runtime_stats
class NKIKernel:
def __init__(self, program_path: str, base_numpy_path: str):
self.program_path = program_path
self.base_numpy_path = base_numpy_path
self.res = KernelProperties()
self.rel_tol = 2e-5
self.perf_tol = 0.01
def profile(self, save_fields: list[str] = []):
os.environ["NEURON_CC_FLAGS"] = "--auto-cast=none"
os.environ['NEURON_RT_NUM_CORES']= '1'
np.random.seed(42)
task_module = load_module_from_path(self.base_numpy_path)
task_fn = task_module.forward
task_np_input_fn = task_module.get_inputs
task_np_inputs = task_np_input_fn()
task_nki_output_fn = task_module.transform_nki_outputs
self.res = KernelProperties()
new_profile_name = f"nki_{uuid.uuid4()}"
with tempfile.TemporaryDirectory(dir="/tmp", prefix=f"{new_profile_name}_") as artifact_dir:
neff_path = os.path.join(artifact_dir, f"kernel_file.neff")
ntff_path = os.path.join(artifact_dir, f"kernel_profile.ntff")
try:
nki_kernel_module = load_module_from_path(self.program_path)
if hasattr(nki_kernel_module, "kernel"):
nki_kernel_fn = nki_kernel_module.kernel
elif hasattr(nki_kernel_module, "optimized_kernel"):
nki_kernel_fn = nki_kernel_module.optimized_kernel
else:
raise ValueError(f"No kernel function found in {self.program_path}")
# Get the transform_to_nki_inputs function
if hasattr(task_module, "transform_to_nki_inputs"):
task_nki_input_fn = task_module.transform_to_nki_inputs
else:
raise ValueError(f"No transform_to_nki_inputs function found in {self.program_path} or {self.base_numpy_path}")
nki_inputs = task_nki_input_fn(task_np_inputs)
output_nki = nki.baremetal(
nki_kernel_fn,
save_neff_name=neff_path,
save_trace_name=ntff_path,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
self.res.compiled = True
self.res.runnable = True
except Exception as e:
print(f"Compilation failure. Error: {e}")
self.res.metadata["compilation_error"] = str(e)
self.res.metadata["compilation_traceback"] = traceback.format_exc()
return self.res
try:
for rnd_seed in [0, 21, 42, 63, 84]:
np.random.seed(rnd_seed)
task_np_inputs = task_np_input_fn()
nki_inputs = task_nki_input_fn(task_np_inputs)
output_task = task_fn(*task_np_inputs)
output_nki_raw = nki.baremetal(
nki_kernel_fn,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
output_nki = task_nki_output_fn(output_nki_raw, output_task)
check_precision_and_correctness(self.program_path, output_nki, output_task, self.res, self.rel_tol)
if not self.res.correct:
break
except Exception as e:
print(f"Correct checking failure. Error: {e}")
self.res.metadata["correctness_error"] = str(e)
return self.res
if not self.res.correct:
return self.res
try:
runtime_stats = benchmark_latency(2, 10, nki_kernel_fn, nki_inputs, artifact_dir)
rel_diff = runtime_stats["rel_diffs"]
rel_diff_list = [rel_diff]
runtime_stats_list = [runtime_stats]
while rel_diff > self.perf_tol:
print(f"Retry: {self.program_path } at {len(rel_diff_list)}; rel_diffs: {rel_diff_list}")
time.sleep(1)
rel_diff_list.append(rel_diff)
runtime_stats_list.append(runtime_stats)
if len(rel_diff_list) > 2: # Just retry twice. In paper, we did 10 times.
break
runtime_stats = runtime_stats_list[np.argmin(rel_diff_list)]
self.res.metadata["latency"] = runtime_stats["mean_ms"]
self.res.metadata["min_ms"] = runtime_stats["min_ms"]
self.res.metadata["max_ms"] = runtime_stats["max_ms"]
self.res.metadata["rel_diffs"] = runtime_stats["rel_diffs"]
summary_profile_path = os.path.join(artifact_dir, f"{new_profile_name}_summary_profile.json")
summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
os.system(summary_profile_cmd)
summary = json.load(open(summary_profile_path, 'r'))
profile_result = summary[next(iter(summary))]
for field in save_fields:
if field in profile_result.keys():
self.res.metadata[field] = profile_result[field]
except Exception as e:
print(f"Benchmarking failure. Error: {e}")
self.res.metadata["benchmarking_error"] = traceback.format_exc()
return self.res
return self.res