import os
import sys
import time
import argparse
import numpy as np
import torch

# allow importing project modules
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from src.utils.helper import get_dataloader, check_device


def parse_args():
    p = argparse.ArgumentParser(description="Benchmark inference for exported DeepPA artifacts")
    p.add_argument("--artifact_path", type=str, required=True, help="Path to exported model artifact (.pt or .onnx)")
    p.add_argument("--artifact_type", type=str, choices=["ts", "onnx"], required=True, help="Artifact type: TorchScript (ts) or ONNX (onnx)")
    p.add_argument("--dataset", type=str, default="SINPA", help="Dataset name for loading inputs")
    p.add_argument("--datapath", type=str, default="./data/SINPA", help="Dataset path containing train/val/test npz")
    p.add_argument("--batch_size", type=int, default=64, help="Batch size to draw for benchmarking")
    p.add_argument("--output_dim", type=int, default=1, help="Output dim expected by dataloader")
    p.add_argument("--device", type=str, default="cpu", help="Device for inference: cpu or cuda")
    p.add_argument("--num_warmup", type=int, default=10, help="Number of warmup runs")
    p.add_argument("--num_iters", type=int, default=50, help="Number of timed runs")
    p.add_argument("--measure_mae", action="store_true", help="Compute MAE vs label (model space)")
    # 为避免节点数不一致问题，允许强制使用非 memmap 的 npz 输入并限制样本数
    p.add_argument("--memmap_eval", action="store_true", help="Use memmap inputs for evaluation if available")
    p.add_argument("--limit_eval_samples", type=int, default=None, help="Limit eval samples per split to this number")
    return p.parse_args()


def load_input(args):
    data = get_dataloader(
        args.datapath,
        args.batch_size,
        args.output_dim,
        num_workers=0,
        pin_memory=False,
        prefetch_factor=None,
        persistent_workers=False,
        memmap_eval=getattr(args, "memmap_eval", False),
        limit_eval_samples=getattr(args, "limit_eval_samples", None),
    )
    # 优先使用 test_loader（通常更小，便于快速基准），否则回退到 val_loader
    loader = data.get("test_loader") or data.get("val_loader")
    X, Y = next(iter(loader))
    device = check_device() if args.device == "cuda" else torch.device("cpu")
    X = X.to(device)
    return X, Y


def benchmark_ts(path, X, device, num_warmup, num_iters, measure_mae=False, Y=None):
    m = torch.jit.load(path, map_location=device)
    m.eval()
    # warmup
    with torch.no_grad():
        for _ in range(num_warmup):
            _ = m(X)
            if device.type == "cuda":
                torch.cuda.synchronize()
    # timed
    times = []
    with torch.no_grad():
        for _ in range(num_iters):
            if device.type == "cuda":
                torch.cuda.synchronize()
            t0 = time.perf_counter()
            out = m(X)
            if device.type == "cuda":
                torch.cuda.synchronize()
            t1 = time.perf_counter()
            times.append(t1 - t0)
    lat_ms = np.mean(times) * 1000.0
    mae = None
    if measure_mae and Y is not None:
        try:
            mae = float(torch.mean(torch.abs(out.cpu() - Y)).item())
        except Exception:
            mae = None
    return lat_ms, mae


def benchmark_onnx(path, X, num_warmup, num_iters, measure_mae=False, Y=None, device_str="cpu"):
    try:
        import onnxruntime as ort
    except Exception:
        print("onnxruntime not installed; install via 'pip install onnxruntime' for CPU or 'onnxruntime-gpu' for CUDA")
        return None, None
    providers = ["CPUExecutionProvider"]
    if device_str.startswith("cuda"):
        # prefer GPU provider if available
        providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
    sess = ort.InferenceSession(path, providers=providers)
    x_np = X.detach().cpu().numpy().astype(np.float32)
    # warmup
    for _ in range(num_warmup):
        _ = sess.run(None, {"X": x_np})
    # timed
    times = []
    out_np = None
    for _ in range(num_iters):
        t0 = time.perf_counter()
        out = sess.run(None, {"X": x_np})
        t1 = time.perf_counter()
        times.append(t1 - t0)
        out_np = out[0] if isinstance(out, list) and len(out) > 0 else None
    lat_ms = np.mean(times) * 1000.0
    mae = None
    if measure_mae and Y is not None and out_np is not None:
        try:
            y_np = Y.detach().cpu().numpy().astype(np.float32)
            mae = float(np.mean(np.abs(out_np - y_np)))
        except Exception:
            mae = None
    return lat_ms, mae


def main():
    args = parse_args()
    device = torch.device("cuda") if args.device == "cuda" and torch.cuda.is_available() else torch.device("cpu")
    X, Y = load_input(args)
    print(f"Artifact: {args.artifact_path}\nType: {args.artifact_type}\nDevice: {device.type}\nBatch shape: {tuple(X.shape)}")

    if args.artifact_type == "ts":
        lat_ms, mae = benchmark_ts(args.artifact_path, X, device, args.num_warmup, args.num_iters, args.measure_mae, Y)
    else:
        lat_ms, mae = benchmark_onnx(args.artifact_path, X, args.num_warmup, args.num_iters, args.measure_mae, Y, args.device)

    if lat_ms is None:
        print("Benchmark failed or provider not available.")
        return
    print(f"Avg latency: {lat_ms:.3f} ms (over {args.num_iters} runs)")
    if args.measure_mae and mae is not None:
        print(f"MAE (model space): {mae:.5f}")


if __name__ == "__main__":
    main()