#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse, json, time, statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
import os, base64
from tqdm import tqdm
from utils_monitor import ResourceMonitor, save_charts_from_metrics_csv


def extract_letter(s):
    if s is None: return None
    if isinstance(s, list) and s: s = s[0]
    s = str(s)
    for ch in s:
        up = ch.upper()
        if "A" <= up <= "J": return up
    return None


def percentile(data, perc):
    if not data:
        return None
    return sorted(data)[int(len(data) * perc / 100)]


def call_api(api_url, image, prompt, reference_letter=None):
    t0 = time.time()
    try:
        resp = requests.post(api_url, json={"image": image, "question": prompt}, timeout=120)
        t1 = time.time()
        e2el = t1 - t0
        resp.raise_for_status()
        data = resp.json()
        ttft = data.get("timing", {}).get("ttft") - t0 or 0.0
        usage = data.get("usage", {})
        pred_raw = data.get("answer", "").strip()
        pred_letter = extract_letter(pred_raw)
        is_correct = (pred_letter == reference_letter) if reference_letter else None
        out_tok = usage.get("output_tokens") or 0
        decode_time = max(e2el - float(ttft), 0.0)

        return {
            "ok": True,
            "ttft": float(ttft),
            "e2el": e2el,
            "decode_time": decode_time,  # 新增：该请求解码时长
            "input_tokens": usage.get("input_tokens"),
            "output_tokens": out_tok,
            "pred_letter": pred_letter,
            "reference_letter": reference_letter,
            "pred_raw": pred_raw,
            "is_correct": is_correct,
            "t_start": t0,  # 新增：请求开始时间
            "t_end": t1  # 新增：请求结束时间
        }
    except Exception as e:
        return {"ok": False, "error": str(e), "e2el": time.time() - t0, "t_start": t0, "t_end": time.time()}


def _to_data_url(image_path_or_b64: str) -> str:
    # 已是 data:image/...;base64,*** 直接返回
    if isinstance(image_path_or_b64, str) and image_path_or_b64.startswith("data:image"):
        return image_path_or_b64
    # 否则按本地路径读取并转 base64
    with open(image_path_or_b64, "rb") as f:
        b64 = base64.b64encode(f.read()).decode()
    # 简单按扩展名推测 mime
    ext = os.path.splitext(image_path_or_b64)[1].lower()
    mime = "jpeg" if ext in [".jpg", ".jpeg"] else "png"
    return f"data:image/{mime};base64,{b64}"


def call_api_openai(api_url, model_id, image, prompt, reference_letter=None):
    # OpenAI chat completions 端点：/v1/chat/completions
    t0 = time.time()
    try:
        data_url = _to_data_url(image) if image else None
        content = [{"type": "text", "text": prompt}]
        if data_url:
            content.append({"type": "image_url", "image_url": data_url})
        payload = {
            "model": model_id,
            "messages": [{"role": "user", "content": content}],
            "temperature": 0.0,
            "max_tokens": 128
        }
        resp = requests.post(api_url, json=payload, timeout=120)
        t1 = time.time()
        e2el = t1 - t0
        resp.raise_for_status()
        j = resp.json()

        text = j["choices"][0]["message"]["content"].strip()
        usage = j.get("usage", {}) or {}
        # OpenAI usage 字段：prompt_tokens / completion_tokens / total_tokens
        input_tok = usage.get("prompt_tokens", 0)
        out_tok = usage.get("completion_tokens", 0)

        # vLLM 这条路径目前无法精确取 server 端 TTFT —— 用 None
        ttft = None

        pred_letter = extract_letter(text)
        is_correct = (pred_letter == reference_letter) if reference_letter else None

        decode_time = max(e2el - (ttft or 0.0), 0.0)
        return {
            "ok": True,
            "ttft": ttft,
            "e2el": e2el,
            "decode_time": decode_time,
            "input_tokens": input_tok,
            "output_tokens": out_tok,
            "pred_letter": pred_letter,
            "pred_raw": text,
            "is_correct": is_correct,
            "t_start": t0,
            "t_end": t1
        }
    except Exception as e:
        return {"ok": False, "error": str(e), "e2el": time.time() - t0, "t_start": t0, "t_end": time.time()}


def run_bench_eval(api_url, jsonl_path, total_requests=100, concurrency=5):
    samples = []
    with open(jsonl_path, "r", encoding="utf-8") as f:
        for line in f:
            samples.append(json.loads(line))
    if total_requests > len(samples):
        total_requests = len(samples)
    samples = samples[:total_requests]

    results = []
    with ThreadPoolExecutor(max_workers=concurrency) as executor:
        futures = []
        for s in tqdm(samples, desc="Submitting requests"):
            image = s["input"]["images"][0] if s["input"]["images"] else ""
            prompt = s["input"].get("prompt") or s["input"].get("question") or s.get("question", "")
            reference_letter = extract_letter(s.get("reference"))
            if ARGS_API_KIND == "openai":
                futures.append(
                    executor.submit(call_api_openai, ARGS_API_URL, ARGS_MODEL_ID, image, prompt, reference_letter))
            else:
                futures.append(executor.submit(call_api, api_url, image, prompt, reference_letter))
        for fut in as_completed(futures):
            results.append(fut.result())

    ok = [r for r in results if r["ok"]]
    e2el_list = [r["e2el"] for r in ok]
    ttft_list = [r["ttft"] for r in ok if r["ttft"] is not None]
    input_tokens = [r["input_tokens"] for r in ok if r.get("input_tokens") is not None]
    acc_list = [r["is_correct"] for r in ok if r["is_correct"] is not None]

    # === 新增：全局聚合口径 ===
    total_decode_time = sum((r.get("decode_time") or 0.0) for r in ok)
    total_output_tokens = sum((r.get("output_tokens") or 0) for r in ok)
    tokens_per_sec_global = (total_output_tokens / total_decode_time) if total_decode_time > 0 else None

    # 跑批时间窗（用于 RPS）
    if ok:
        wall_time = (max(r["t_end"] for r in ok) - min(r["t_start"] for r in ok))
    else:
        wall_time = None
    responses_per_sec = (len(ok) / wall_time) if wall_time and wall_time > 0 else None

    report = {
        "total_requests": len(samples),
        "concurrency": concurrency,
        "success": len(ok),
        "fail": len(results) - len(ok),
        "accuracy": statistics.mean(acc_list) if acc_list else None,
        "e2el_avg": statistics.mean(e2el_list) if e2el_list else None,
        "e2el_p50": percentile(e2el_list, 50),
        "e2el_p90": percentile(e2el_list, 90),
        "e2el_p99": percentile(e2el_list, 99),
        "ttft_avg": statistics.mean(ttft_list) if ttft_list else None,
        "input_tokens_avg": statistics.mean(input_tokens) if input_tokens else None,
        "tokens_per_sec_global": tokens_per_sec_global,
        "responses_per_sec": responses_per_sec,
    }
    return report, results


if __name__ == "__main__":
    ap = argparse.ArgumentParser()
    ap.add_argument("--api-url", required=True)
    ap.add_argument("--api-kind", choices=["legacy", "openai"], default="legacy",
                    help="legacy=/image-qa 自研接口；openai=vLLM /v1/chat/completions")
    ap.add_argument("--model-id", default="Qwen/Qwen2.5-VL-7B-Instruct",
                    help="vLLM 下发送到 OpenAI 接口的 model 名称")
    ap.add_argument("--data", required=True)
    ap.add_argument("--total", type=int, default=500)
    ap.add_argument("--concurrency", type=int, default=5)
    ap.add_argument("--out", default="bench_eval_results.jsonl")
    ap.add_argument("--metrics-csv", default="", help="resource CSV path")
    ap.add_argument("--metrics-interval", type=float, default=0.5, help="sampling interval seconds")
    ap.add_argument("--plot-outdir", default="results/", help="保存折线图的输出目录（留空则不生成图）")
    ap.add_argument("--plot-prefix", default="", help="折线图文件名前缀，例如 Model_c10_")
    ap.add_argument("--ma", type=int, default=1, help="折线图移动平均窗口大小（>=2会平滑）")

    args = ap.parse_args()
    ARGS_API_KIND = args.api_kind
    ARGS_API_URL = args.api_url
    ARGS_MODEL_ID = args.model_id

    if ARGS_API_KIND == "openai" and not ARGS_API_URL.endswith("/v1/chat/completions"):
        ARGS_API_URL = ARGS_API_URL.rstrip("/") + "/v1/chat/completions"
    mon = None
    try:
        if args.metrics_csv:
            mon = ResourceMonitor(args.metrics_csv, args.concurrency, True, interval=args.metrics_interval)
            mon.start()
        report, results = run_bench_eval(args.api_url, args.data, args.total, args.concurrency)
    finally:
        if mon:
            mon.stop()

    with open(args.out, "w", encoding="utf-8") as f:
        for r in results:
            f.write(json.dumps(r, ensure_ascii=False) + "\n")

    # 新增：如果提供了 metrics CSV 且指定了输出目录，则自动生成折线图
    if args.metrics_csv and args.plot_outdir:
        try:
            save_charts_from_metrics_csv(
                csv_path=args.metrics_csv,
                outdir=args.plot_outdir,
                prefix=args.plot_prefix,
                ma=args.ma,
                title_suffix=f" (concurrency={args.concurrency})"
            )
        except Exception as e:
            print(f"[WARN] Failed to save charts from CSV: {e}")

    print("\n=== Bench+Eval Report ===")
    print(json.dumps(report, ensure_ascii=False, indent=2))
