#!/usr/bin/env python3
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse, json, os, signal, subprocess, sys, time
from datetime import datetime
import csv
from pathlib import Path
import urllib.request
from summary_md import render_markdown_table
import matplotlib

matplotlib.use("Agg")


def run_bench(api_url, api_kind, model_id, data_path, total, concurrency, out_path, metrics_interval=0.5, ma=3):
    charts_dir = os.path.join(out_path, "charts")
    os.makedirs(charts_dir, exist_ok=True)
    cmd = [
        sys.executable, "bench_eval_image_qa.py",
        "--api-url", api_url,
        "--api-kind", api_kind,
        "--model-id", model_id,
        "--data", data_path,
        "--total", str(total),
        "--concurrency", str(concurrency),
        "--out", os.path.join(out_path, f"c{concurrency}.detail.jsonl"),
        "--metrics-interval", str(metrics_interval),
        "--metrics-csv", os.path.join(charts_dir, f"c{concurrency}.csv"),
        "--plot-outdir", charts_dir,  # 新增：直接让 bench 帮忙画图
        "--plot-prefix", f"c{concurrency}_",
        "--ma", str(ma)
    ]
    out = subprocess.check_output(cmd, text=True)
    start = out.rfind("{")
    end = out.rfind("}")
    report = json.loads(out[start:end + 1])
    return report


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--data", required=True)
    ap.add_argument("--api-kind", choices=["legacy", "openai"], default="legacy")
    ap.add_argument("--api-url", default="http://127.0.0.1:8000")
    ap.add_argument("--model-id", default="Qwen/Qwen2.5-VL-7B-Instruct")
    ap.add_argument("--total", type=int, default=500)
    ap.add_argument("--warmup", type=int, default=20)
    ap.add_argument("--concurrencies", default="1,10")
    ap.add_argument("--outdir", default="sweep_reports")
    ap.add_argument("--inflight", type=int, default=1)
    ap.add_argument("--metrics-interval", type=float, default=0.5)
    ap.add_argument("--ma", type=int, default=1, help="折线图移动平均窗口（>=2 会平滑）")
    args = ap.parse_args()

    concs = [int(x) for x in args.concurrencies.split(",")]
    all_rows = []
    os.makedirs(args.outdir, exist_ok=True)
    print(f"\n=== Start to Predict  ===")
    model_rows = []
    if args.warmup > 0:
        _ = run_bench(args.api_url, args.api_kind, args.model_id, args.data, total=args.warmup, concurrency=1,
                      out_path=args.outdir,
                      metrics_interval=args.metrics_interval, ma=args.ma)

    for c in concs:
        rpt = run_bench(args.api_url, args.api_kind, args.model_id, args.data, total=args.total, concurrency=c,
                        out_path=args.outdir,
                        metrics_interval=args.metrics_interval, ma=args.ma)
        row = {"concurrency": c, **rpt}
        all_rows.append(row)
        model_rows.append(row)
    # summary.csv
    csv_path = os.path.join(args.outdir, "summary.csv")
    Path(args.outdir).mkdir(parents=True, exist_ok=True)

    with open(csv_path, "w", encoding="utf-8", newline="") as f:
        keys = ["concurrency", "accuracy", "e2el_avg", "e2el_p50", "e2el_p90", "e2el_p99", "ttft_avg",
                "responses_per_sec", "tokens_per_sec_global", "input_tokens_avg", "success", "fail", "total_requests"]
        w = csv.DictWriter(f, fieldnames=keys)
        w.writeheader()
        for r in all_rows:
            w.writerow({k: r.get(k) for k in keys})
    print(f"Summary CSV -> {csv_path}")

    # markdown
    md_path = os.path.join(args.outdir, "summary.md")
    md = render_markdown_table(csv_path)
    Path(md_path).write_text(md, encoding="utf-8")
    print(f"Summary MD  -> {md_path}")


if __name__ == "__main__":
    main()
