# -*- coding: utf-8 -*-
import argparse
import json
import sys
from typing import List

import numpy as np
import onnx
import onnxruntime as ort


DEFAULT_ORDER = [
    "method_get","method_post","header_cnt","ua_len","query_size","body_size","param_cnt","is_ak_present",
    "has_user","user_mod_10","path_hash_mod_100",
    "qps_user_path","delta_user_path","entropy_ip","ip_hash_mod_1000","ua_hash_mod_1000"
]


def parse_args():
    p = argparse.ArgumentParser(description="Verify IsolationForest ONNX for API Gateway")
    p.add_argument("--model", required=True, help="Path to iforest.onnx")
    p.add_argument("--features", default=",".join(DEFAULT_ORDER),
                   help="Comma-separated feature ORDER expected by Java")
    p.add_argument("--dump", default="", help="Optional path to write a JSON report")
    return p.parse_args()


def printable_graph_summary(m: onnx.ModelProto) -> dict:
    # collect some meta to json (avoid printing the entire huge graph)
    opset_imports = {imp.domain: imp.version for imp in m.opset_import}
    inputs = [{"name": i.name,
               "elem_type": getattr(getattr(i.type, "tensor_type", None), "elem_type", None),
               "shape": [d.dim_value if d.dim_value != 0 else None
                         for d in getattr(getattr(i.type, "tensor_type", None), "shape", onnx.TensorShapeProto()).dim]
               } for i in m.graph.input]
    outputs = [{"name": o.name,
                "elem_type": getattr(getattr(o.type, "tensor_type", None), "elem_type", None),
                "shape": [d.dim_value if d.dim_value != 0 else None
                          for d in getattr(getattr(o.type, "tensor_type", None), "shape", onnx.TensorShapeProto()).dim]
                } for o in m.graph.output]
    return {
        "ir_version": m.ir_version,
        "producer_name": m.producer_name,
        "producer_version": m.producer_version,
        "domain": m.domain,
        "model_version": m.model_version,
        "opset_imports": opset_imports,
        "inputs": inputs,
        "outputs": outputs,
        "node_count": len(m.graph.node),
    }


def main():
    args = parse_args()
    expected_order: List[str] = [x.strip() for x in args.features.split(",") if x.strip()]

    # 1) Load & basic check
    model = onnx.load(args.model)
    onnx.checker.check_model(model)

    meta = printable_graph_summary(model)

    print("======== ONNX MODEL META ========")
    print(f"IR version      : {meta['ir_version']}")
    print(f"Producer        : {meta['producer_name']} {meta['producer_version']}")
    print(f"Model domain    : {meta['domain']}")
    print(f"Model version   : {meta['model_version']}")
    print(f"Opset imports   : {meta['opset_imports']}")
    print(f"Inputs          : {[i['name'] for i in meta['inputs']]}")
    print(f"Outputs         : {[o['name'] for o in meta['outputs']]}")
    print(f"Node count      : {meta['node_count']}")
    if "" in meta["opset_imports"]:
        print(f"  - onnx main opset: {meta['opset_imports']['']}")
    if "ai.onnx.ml" in meta["opset_imports"]:
        print(f"  - ai.onnx.ml opset: {meta['opset_imports']['ai.onnx.ml']}")

    # 2) Shape check (first input assumed feature vector)
    if not meta["inputs"]:
        print("❌ 模型没有输入？请检查导出。")
        sys.exit(2)
    inp0 = meta["inputs"][0]
    print("\n======== INPUT TENSOR ========")
    print(f"name  : {inp0['name']}")
    print(f"shape : {inp0['shape']}  (形如 [None, feature_dim])")
    feature_dim = None
    if inp0["shape"] and len(inp0["shape"]) >= 2:
        feature_dim = inp0["shape"][-1]
    print(f"推断的特征维度 feature_dim = {feature_dim}")

    # 3) Compare with expected ORDER length
    exp_len = len(expected_order)
    if isinstance(feature_dim, int):
        if feature_dim != exp_len:
            print(f"\n❌ 特征维度不一致：ONNX 模型需要 {feature_dim} 维，但 Java ORDER 提供 {exp_len} 维。")
            print("   - 请保证 Python 训练脚本中的 FEATURES 与 Java ORDER 完全一致（名称与顺序）。")
        else:
            print(f"\n✅ 特征维度一致：{feature_dim} 维。")
    else:
        print("\n⚠️ 模型未固定特征维度（shape 中为 None），将以运行时张量形状为准。请确保 Java 传入的长度为：", exp_len)

    # 4) Quick inference (zeros) to get output names & shapes
    print("\n======== DRY RUN (Zero Vector) ========")
    sess = ort.InferenceSession(args.model, providers=["CPUExecutionProvider"])
    input_name = sess.get_inputs()[0].name
    output_names = [o.name for o in sess.get_outputs()]
    print(f"Session Inputs  : {[i.name for i in sess.get_inputs()]}")
    print(f"Session Outputs : {output_names}")

    feat_len = exp_len if feature_dim is None else feature_dim
    x = np.zeros((1, feat_len), dtype=np.float32)
    res = sess.run(None, {input_name: x})
    for i, out in enumerate(res):
        print(f"  - output[{i}] name='{output_names[i]}' shape={list(out.shape)} dtype={out.dtype}")

    # 5) Advice for Java
    print("\n======== JAVA 配置建议 ========")
    print("把下面的 ORDER 放到 RiskConfig 中（需与训练脚本一致）：")
    print(expected_order)
    print("若 OnnxIsolationScorer 需要显式输出名（不是第一个输出），使用 explicitOutputName = 上面输出列表中的目标名称。")

    # Optional dump
    if args.dump:
        report = {
            "meta": meta,
            "input_name": input_name,
            "output_names": output_names,
            "expected_order": expected_order,
            "feature_dim_from_model": feature_dim,
        }
        with open(args.dump, "w", encoding="utf-8") as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        print(f"\n已写出 JSON 报告：{args.dump}")


if __name__ == "__main__":
    main()
