# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sqlalchemy import create_engine, text
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType

# === 1. 连接数据库 ===
DB_USER = "root"
DB_PWD  = "123456"
DB_HOST = "192.168.140.128"
DB_PORT = 3306
DB_NAME = "api"

engine = create_engine(
    f"mysql+mysqlconnector://{DB_USER}:{DB_PWD}@{DB_HOST}:{DB_PORT}/{DB_NAME}?charset=utf8mb4",
    pool_pre_ping=True
)

# === 2. 读取最近30天的日志 ===
query = text("""
SELECT id, user_id, interface_info_id, ip, ua, method, ak,
       header_cnt, param_cnt, query_size, body_size, success, latency_ms, created_at
FROM gateway_access_log
WHERE created_at >= NOW() - INTERVAL 30 DAY
""")
df = pd.read_sql(query, engine)

if df.empty:
    raise RuntimeError("⚠️ gateway_access_log 没有数据，请先导入模拟数据！")

# === 3. 特征工程（与 Java ORDER 完全一致） ===
df["method_get"] = (df["method"].str.upper()=="GET").astype(int)
df["method_post"] = (df["method"].str.upper()=="POST").astype(int)
df["header_cnt"] = df["header_cnt"].fillna(0)
df["ua_len"] = df["ua"].fillna("").str.len()
df["query_size"] = df["query_size"].fillna(0)
df["body_size"] = df["body_size"].fillna(0)
df["param_cnt"] = df["param_cnt"].fillna(0)
df["is_ak_present"] = df["ak"].notna().astype(int)
df["has_user"] = df["user_id"].notna().astype(int)
df["user_mod_10"] = df["user_id"].fillna(0).astype(int) % 10
df["path_hash_mod_100"] = df["interface_info_id"].fillna(0).astype(int) % 100
df["ip_hash_mod_1000"] = df["ip"].fillna("").apply(lambda x: hash(x) % 1000)
df["ua_hash_mod_1000"] = df["ua"].fillna("").apply(lambda x: hash(x) % 1000)

# === 4. 时间窗口特征（分钟聚合） ===
# 每个 user+path 在分钟窗口的 QPS
df["minute"] = df["created_at"].dt.floor("T")
qps = (
    df.groupby(["user_id","interface_info_id","minute"])
      .size()
      .reset_index(name="qps_user_path")
)

# 与上分钟比较的 QPS 增量
qps["delta_user_path"] = (
    qps.groupby(["user_id","interface_info_id"])["qps_user_path"]
       .diff().fillna(0)
)

# IP 熵：每个 user 在窗口的 IP 分布熵
def entropy(vals):
    counts = vals.value_counts(normalize=True)
    return -(counts * np.log2(counts+1e-9)).sum()

entropy_ip = (
    df.groupby(["user_id","minute"])["ip"]
      .apply(entropy).reset_index(name="entropy_ip")
)

# merge 回原表
df = df.merge(qps, on=["user_id","interface_info_id","minute"], how="left")
df = df.merge(entropy_ip, on=["user_id","minute"], how="left")

# 缺失补零
df[["qps_user_path","delta_user_path","entropy_ip"]] = df[["qps_user_path","delta_user_path","entropy_ip"]].fillna(0)

# === 5. 确认特征顺序 ===
FEATURES = [
    "method_get","method_post","header_cnt","ua_len","query_size","body_size","param_cnt","is_ak_present",
    "has_user","user_mod_10","path_hash_mod_100",
    "qps_user_path","delta_user_path","entropy_ip","ip_hash_mod_1000","ua_hash_mod_1000"
]

X = df[FEATURES].astype(float).values

# === 6. 训练 IsolationForest ===
pipe = Pipeline([
    ("scaler", StandardScaler()),
    ("iforest", IsolationForest(
        n_estimators=150,
        contamination=0.05,
        random_state=42
    ))
])
pipe.fit(X)

# === 7. 导出为 ONNX ===
initial_type = [("input", FloatTensorType([None, len(FEATURES)]))]
onnx_model = convert_sklearn(
    pipe,
    initial_types=initial_type,
    target_opset={"": 13, "ai.onnx.ml": 3}
)

with open("iforest.onnx", "wb") as f:
    f.write(onnx_model.SerializeToString())

print("✅ 模型导出成功: iforest.onnx")
print("特征顺序:", FEATURES)
