#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from __future__ import annotations
import os
import json
import csv
import base64
import mimetypes
import argparse
import time
from typing import List, Dict, Any, Optional
from openai import OpenAI
from pathlib import Path

# ========== 配置 ==========
API_KEY = "sk-1ae2d1e8b5bf471cbf916d05e9784717"  # 替换为你的 DashScope API Key
BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
MODEL = "qvq-plus-latest"
TEMPERATURE = 0.0
MAX_TOKENS = 900
RETRIES = 3
RETRY_DELAY = 2.0
# ==========================

# ========== 全局客户端 ==========
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)

# ========== 工具函数 ==========
def _norm_path(p: str) -> str:
    """路径归一化：统一斜杠、去除 ./ 前缀"""
    p = p.strip()
    p = p.replace("\\", "/")
    if p.startswith("./"):
        p = p[2:]
    return p

def _image_payload(path_or_url: str) -> Dict[str, Any]:
    """
    支持 http(s) 或本地文件。本地转为 dataURL。
    """
    s = path_or_url.strip()
    if s.lower().startswith(("http://", "https://", "data:")):
        return {"type": "image_url", "image_url": {"url": s}}
    if not os.path.exists(s):
        # 兼容 data/ 和 image/ 路径
        if s.startswith("data/") and os.path.exists(s[5:]):
            s = s[5:]
        elif s.startswith("image/") and os.path.exists("data/" + s):
            s = "data/" + s
        else:
            raise FileNotFoundError(f"找不到图片：{path_or_url}")
    mime, _ = mimetypes.guess_type(s)
    if not mime:
        ext = os.path.splitext(s)[1].lower()
        mime = {
            ".png": "image/png",
            ".jpg": "image/jpeg",
            ".jpeg": "image/jpeg",
            ".webp": "image/webp",
            ".gif": "image/gif"
        }.get(ext, "image/png")
    with open(s, "rb") as f:
        b64 = base64.b64encode(f.read()).decode("utf-8")
    return {"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}}

# ========== 数据加载 ==========
def load_input_json(json_path: str) -> List[Dict[str, Any]]:
    """加载输入 JSON：list of {image, topn: [{caption, text_index}]}"""
    with open(json_path, "r", encoding="utf-8") as f:
        data = json.load(f)
    if not isinstance(data, list):
        raise ValueError("输入 JSON 必须是一个列表")
    return data

def load_source_to_png(csv_path: str) -> Dict[str, str]:
    """读取 CSV，建立 Source -> PNG 路径映射"""
    mapping = {}
    with open(csv_path, "r", encoding="utf-8") as f:
        reader = csv.DictReader(f)
        required = ["Source", "PNG"]
        for col in required:
            if col not in reader.fieldnames:
                raise ValueError(f"CSV 缺少必要列: {col}")
        for row in reader:
            src = _norm_path(row["Source"])
            png = _norm_path(row["PNG"])
            mapping[src] = png
    return mapping

# ========== Prompt 构建 ==========
PROMPT_EN = """You are a meticulous visual analyst specializing in academic figures and charts. You must first analyze the image content (including multi-panel layouts, axes, legends, titles, labels, units, error bars, confidence intervals, significance markers, table cells, callouts/annotations, panel tags like (a)/(b), and any overlaid text), then carefully compare it against a small set of candidate captions. Your goal is to choose the single best candidate that most accurately and specifically describes THIS figure, with strong emphasis on the visible text and quantitative details.

CRITICAL INSTRUCTIONS FOR ACADEMIC FIGURES:
1) Step-by-step analysis:
   - Identify figure type(s): line/bar/scatter chart, heatmap/confusion matrix, ablation table, ROC/PR curve, histograms, box plots, multi-panel composite, UI/workflow diagram, etc.
   - Read textual elements exactly: figure title/subtitle, axis titles, axis units/scales (linear/log), tick labels, legend entries, dataset/model names, metrics (e.g., accuracy, F1, BLEU, AUC), hyperparameters, statistical notes (e.g., n=, p-values, ± std/CI), and panel labels (a)(b)(c)….
   - Note key patterns: trends, rankings, cross-overs, best/worst models, significant gaps, error bars overlapping/non-overlapping, ablation effects, dataset/domain differences.
2) Faithfulness to the image:
   - If a candidate contradicts the figure text, units, metrics, model/dataset names, or panel scope, REJECT it.
   - Prefer captions that match the exact entities/metrics and the primary takeaway shown (e.g., "Model X outperforms Y on Dataset Z with higher F1" when the legend/axis supports it).
   - Do NOT invent claims beyond visible content (no unseen datasets, methods, or numbers).
3) Specificity over generic:
   - Favor captions naming the correct model(s), dataset(s), and metric(s) as visible.
   - If the figure is multi-panel, ensure the caption applies to the shown panel(s) and not an unrelated panel.
   - If text shows ablation/variant names, respect them (e.g., "w/o pretraining", "+FT", "baseline").
4) Tie-breaking (when multiple are plausible):
   (a) Highest fidelity to visible text and units,
   (b) Correctness of entities/metrics/datasets and any statistical qualifiers,
   (c) Clarity and specificity of the main finding,
   (d) Brevity if equally accurate.
5) You MUST select exactly ONE candidate from the provided list. DO NOT invent a new caption. Use the chosen caption verbatim.

OUTPUT FORMAT (MANDATORY):
Return ONLY a single JSON object in a fenced code block with keys:
{
  "caption": "<the chosen candidate string, exactly as given>",
  "text_index": <the chosen candidate's text_index integer>
}
No additional text outside the JSON code block. No markdown besides the single fenced code block. Do not change wording or punctuation of the chosen caption.

CANDIDATES:
<You will be given a JSON array of objects each with {caption, text_index}. Use them verbatim.>"""

def build_messages(
    image_payload: Dict[str, Any],
    candidates: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
    """构建消息列表"""
    slim_cands = [{"caption": c.get("caption", ""), "text_index": c.get("text_index")} for c in candidates]
    content = [
        image_payload,
        {"type": "text", "text": PROMPT_EN + "\n\nCANDIDATE LIST (JSON):\n" + json.dumps(slim_cands, ensure_ascii=False)}
    ]
    return [{"role": "user", "content": content}]

# ========== JSON 提取 ==========
FENCED_JSON_RE = r"```(?:json)?\s*({.*?})\s*```"

def extract_fenced_json(text: str) -> Dict[str, Any]:
    import re
    match = re.search(FENCED_JSON_RE, text, re.DOTALL | re.IGNORECASE)
    if not match:
        try:
            return json.loads(text.strip())
        except:
            raise ValueError("模型未返回有效的 JSON 或代码块")
    try:
        return json.loads(match.group(1))
    except Exception as e:
        raise ValueError(f"JSON 解析失败: {e}")

# ========== 模型调用 ==========
def call_qvq_with_retry(
    messages: List[Dict[str, Any]],
    temperature: float = TEMPERATURE,
    max_tokens: int = MAX_TOKENS,
    retries: int = RETRIES,
    retry_delay: float = RETRY_DELAY
) -> Dict[str, Any]:
    """带重试的流式调用"""
    for _ in range(retries):
        try:
            completion = client.chat.completions.create(
                model=MODEL,
                messages=messages,
                temperature=temperature,
                stream = True,
                max_tokens=max_tokens
            )
            full_content = ""
            for chunk in completion:
                if chunk.choices and (delta := chunk.choices[0].delta):
                    full_content += delta.content or ""
            # 提取 JSON
            return extract_fenced_json(full_content)
        except Exception as e:
            print(f"[重试中...] 错误: {e}")
            time.sleep(0.01)
    raise RuntimeError("调用失败，重试耗尽")

# ========== 主流程 ==========
def main():
    parser = argparse.ArgumentParser(description="学术图像 caption 选择（基于 QVQ）")
    parser.add_argument("--in", dest="in_json", required=True, help="输入 JSON 文件（含 image, topn）")
    parser.add_argument("--out", dest="out_json", required=True, help="输出 JSON 文件")
    parser.add_argument("--input_csv", required=True, help="CSV 文件，含 Source 和 PNG 路径")
    parser.add_argument("--max_items", type=int, default=None, help="限制处理条数（调试用）")
    args = parser.parse_args()

    # 加载数据
    items = load_input_json(args.in_json)
    source_to_png = load_source_to_png(args.input_csv)

    results = []
    processed = 0

    for item in items:
        if args.max_items and processed >= args.max_items:
            break

        source_raw = item.get("image", "").strip()
        if not source_raw:
            print(f"[跳过] 空 image 字段")
            continue
        source = _norm_path(source_raw)
        png_path = source_to_png.get(source)
        if not png_path:
            print(f"[跳过] 无匹配 PNG: {source}")
            continue

        candidates = item.get("topn", [])
        if not candidates:
            print(f"[跳过] 无候选 caption: {source}")
            continue

        try:
            img_msg = _image_payload(png_path)
            messages = build_messages(img_msg, candidates)
            response = call_qvq_with_retry(messages)

            # 验证输出
            for k in ("caption", "text_index"):
                if k not in response:
                    raise ValueError(f"缺失字段: {k}")

            results.append({
                "image": source_raw,
                "png_path": png_path,
                "selected_caption": response["caption"],
                "text_index": response["text_index"]
            })
            print(f"[成功] {source_raw} -> text_index={response['text_index']}")
            processed += 1
            print(processed)

        except Exception as e:
            print(f"[失败] {source_raw}: {e}")
            results.append({
                "image": source_raw,
                "error": str(e)
            })

        time.sleep(0.02)

    # 保存结果
    with open(args.out_json, "w", encoding="utf-8") as f:
        json.dump(results, f, ensure_ascii=False, indent=2)

    print(f"完成：{len(results)} 条结果已保存至 {args.out_json}")

if __name__ == "__main__":
    main()