"""
Retrieve Top-N captions for each visual input (PDF/PNG/JPG) using a fine-tuned OpenCLIP checkpoint.

- Loads your fine-tuned ckpt (saved by s2_clip_ft.py) and builds ViT-B/16 (or from ckpt args).
- Accepts visual sources from a CSV (column name: `Source`) via --sources_csv, or by scanning a folder (--data_root),
  or from a plain text list (--sources_txt). Priority: sources_csv > sources_txt > data_root.
- Renders PDF pages (first page by default, or all pages) into PIL images.
- Batches image & text embedding extraction on GPU (with AMP), computes cosine similarities, and
  writes a JSON mapping each image (or pdf::page) to Top-N candidate captions + scores.

Usage
-----
python clip_retrieve_topn.py \
  --ckpt user_data/model_data/ckpt_clip_ft.pt \
  --sources_csv ./image_sources.csv \
  --candidates_csv ./candidates.csv \
  --out ./retrieval_topn.json \
  --topn 10 \
  --pdf_pages first   # or all/skip

Dependencies
------------
pip install torch torchvision open-clip-torch pandas tqdm pillow pymupdf

Notes
-----
- Cosine similarity on L2-normalized features is used for ranking (consistent with eval in s2_clip_ft.py).
- ~1500 images × 1500 texts = 2.25M sims; computed in blocks on GPU and kept as fp16 on CPU to save RAM.
- PDF pages are labeled as "path/to/file.pdf::page=K" in the JSON.
"""

import os, json, math, argparse
from pathlib import Path
from typing import List, Tuple, Dict

import torch
import torch.nn.functional as F
from tqdm import tqdm
import pandas as pd
import open_clip

from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = None

torch.backends.cudnn.benchmark = True

# ---- PDF rendering (PyMuPDF) ----
try:
    import fitz  # PyMuPDF
    HAVE_FITZ = True
except Exception:
    HAVE_FITZ = False


def load_model_from_ckpt(ckpt_path: str, device: str = "cuda"):
    """Load model + preprocess from ckpt safely."""
    ckpt = torch.load(ckpt_path, map_location="cpu")
    args_in_ckpt = ckpt.get("args", {})
    model_name = args_in_ckpt.get("model", "ViT-B-16")

    # 1. 创建模型（不要加载预训练）
    model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(model_name, pretrained=None)
    tokenizer = open_clip.get_tokenizer(model_name)

    # 2. 读取 checkpoint 权重
    raw_state = ckpt.get("state_dict", ckpt)

    # 3. 处理 key：只在有 module. 前缀时去掉
    cleaned = {}
    for k, v in raw_state.items():
        k2 = k[7:] if k.startswith("module.") else k
        cleaned[k2] = v

    # 4. 与当前模型 state_dict 对齐（名字一致且 shape 一致）
    model_sd = model.state_dict()
    filtered = {}
    mismatched = []
    for k, v in cleaned.items():
        if k in model_sd:
            if tuple(model_sd[k].shape) == tuple(v.shape):
                filtered[k] = v
            else:
                mismatched.append((k, tuple(v.shape), tuple(model_sd[k].shape)))

    # 5. 打印信息（可选）
    missing = [k for k in model_sd if k not in filtered]
    unexpected = [k for k in cleaned if k not in model_sd]

    if mismatched:
        print(f"[WARN] {len(mismatched)} keys mismatched in shape (e.g. {mismatched[0]})")
    if unexpected:
        print(f"[WARN] {len(unexpected)} unexpected keys in ckpt (e.g. {unexpected[:3]})")
    if missing:
        print(f"[WARN] {len(missing)} model keys missing in ckpt (e.g. {missing[:3]})")

    # 6. 加载
    model.load_state_dict(filtered, strict=False)
    model.to(device)
    model.eval()
    return model, preprocess_val, tokenizer


def list_sources_from_csv(csv_path: str, base_dir: str = None) -> Tuple[List[str], List[str]]:
    """返回 (原始Source列表, 解析后的路径列表)"""
    df = pd.read_csv(csv_path)
    if "Source" not in df.columns:
        raise ValueError("sources_csv must have a 'Source' column")
    raw_sources = df["Source"].astype(str).tolist()
    raw_paths = raw_sources.copy()  # 保存原始Source

    base = Path(base_dir).resolve() if base_dir else None

    def resolve_one(p: str) -> str:
        # 解析逻辑保持不变...
        q = Path(p)
        # 1) 已是绝对路径且存在
        if q.is_absolute() and q.exists():
            return str(q)
        if base:
            # 2) 直接拼接 base/相对路径
            cand = (base / q)
            if cand.exists():
                return str(cand)
            # 3) 处理 './dataset/...' 这种多余的目录前缀
            parts = q.parts
            if parts and (parts[0] in {".", "dataset", "./dataset"}):
                q2 = Path(*parts[1:])  # 去掉前缀
                cand2 = (base / q2)
                if cand2.exists():
                    return str(cand2)
            # 4) 仅用文件名兜底
            cand3 = (base / q.name)
            if cand3.exists():
                return str(cand3)
            # 5) 最后兜底：即便不存在也返回拼接结果，后续会有 WARN
            return str(cand)
        # 无 base_dir：原样返回
        return str(q)

    resolved_paths = [resolve_one(p) for p in raw_paths]
    return raw_sources, resolved_paths



def list_sources(data_root: str, sources_txt: str = None, exts=(".pdf", ".png", ".jpg", ".jpeg")) -> List[str]:
    if sources_txt:
        with open(sources_txt, "r", encoding="utf-8") as f:
            paths = [ln.strip() for ln in f if ln.strip()]
        return paths
    root = Path(data_root)
    paths = []
    for ext in exts:
        paths.extend([str(p) for p in root.rglob(f"*{ext}")])
    paths.sort()
    return paths


def render_pdf_pages(pdf_path: str, dpi: int = 200, pages: str = "first") -> List[Tuple[str, Image.Image]]:
    if not HAVE_FITZ:
        raise RuntimeError("PyMuPDF not installed; pip install pymupdf or set --pdf_pages=skip")
    doc = fitz.open(pdf_path)
    out = []
    page_indices = [0] if pages == "first" else list(range(len(doc)))
    for i in page_indices:
        page = doc.load_page(i)
        mat = fitz.Matrix(dpi/72.0, dpi/72.0)
        pix = page.get_pixmap(matrix=mat, alpha=False)
        img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
        label = f"{pdf_path}::page={i}"
        out.append((label, img))
    doc.close()
    return out


def iter_visual_tensors(paths: List[str], preprocess, pdf_pages: str, pdf_dpi: int, batch_size: int, device: str):
    batch_imgs, batch_lbls = [], []
    for p in paths:
        p_lower = p.lower()
        if p_lower.endswith(".pdf"):
            if pdf_pages == "skip":
                continue
            try:
                for label, pil_img in render_pdf_pages(p, dpi=pdf_dpi, pages=pdf_pages):
                    im = preprocess(pil_img.convert("RGB"))
                    batch_imgs.append(im)
                    batch_lbls.append(label)
                    if len(batch_imgs) >= batch_size:
                        imgs = torch.stack(batch_imgs, dim=0).to(device, non_blocking=True)
                        yield batch_lbls, imgs
                        batch_imgs, batch_lbls = [], []
            except Exception as e:
                print(f"[WARN] Failed to render PDF {p}: {e}")
        else:
            try:
                pil_img = Image.open(p).convert("RGB")
                im = preprocess(pil_img)
                batch_imgs.append(im)
                batch_lbls.append(p)
            except Exception as e:
                print(f"[WARN] Failed to load image {p}: {e}")
            if len(batch_imgs) >= batch_size:
                imgs = torch.stack(batch_imgs, dim=0).to(device, non_blocking=True)
                yield batch_lbls, imgs
                batch_imgs, batch_lbls = [], []

    if batch_imgs:
        imgs = torch.stack(batch_imgs, dim=0).to(device, non_blocking=True)
        yield batch_lbls, imgs


def encode_images(paths: List[str], model, preprocess, device: str, pdf_pages: str = "first", pdf_dpi: int = 200,
                  batch: int = 64):
    """返回 (原始Source列表, 处理后的标签列表, 特征张量)"""
    original_sources, resolved_paths = paths  # 现在paths是元组 (original_sources, resolved_paths)
    labels, feats = [], []

    with torch.no_grad(), torch.amp.autocast("cuda"):
        # 修改iter_visual_tensors调用，传入解析后的路径
        for lbls, img_t in tqdm(iter_visual_tensors(resolved_paths, preprocess, pdf_pages, pdf_dpi, batch, device),
                                total=None, desc="Encode images", ncols=100):
            f = F.normalize(model.encode_image(img_t), dim=-1)
            labels.extend(lbls)
            feats.append(f.half().cpu())

    if feats:
        feats = torch.cat(feats, dim=0)
    else:
        feats = torch.empty(0, model.text_projection.shape[1] if hasattr(model, 'text_projection') else 512)

    # 返回原始Source和处理后的标签
    return original_sources, labels, feats


def encode_texts(df: pd.DataFrame, model, tokenizer, device: str, batch: int = 256):
    if "Caption" not in df.columns:
        raise ValueError("candidates_csv must have a 'Caption' column")
    captions = df["Caption"].astype(str).tolist()
    #cap_sources = df["Source"].astype(str).tolist() if "Source" in df.columns else [""] * len(captions)

    all_feats = []
    with torch.no_grad(), torch.amp.autocast("cuda"):
        for i in tqdm(range(0, len(captions), batch), desc="Encode texts", ncols=100):
            toks = tokenizer(captions[i:i+batch]).to(device)
            f = F.normalize(model.encode_text(toks), dim=-1)
            all_feats.append(f.half().cpu())
    if all_feats:
        all_feats = torch.cat(all_feats, dim=0)
    else:
        all_feats = torch.empty(0,)
    return captions, all_feats


def topn_for_each_image(img_feats: torch.Tensor, txt_feats: torch.Tensor, texts: List[str], topn: int) -> List[List[Dict]]:
    if img_feats.numel() == 0 or txt_feats.numel() == 0:
        return [[] for _ in range(img_feats.shape[0])]
    sims = (img_feats.float() @ txt_feats.float().T)
    Ni = sims.size(0)
    topn = min(topn, sims.size(1))
    vals, idxs = torch.topk(sims, k=topn, dim=1)
    results = []
    for i in range(Ni):
        row = []
        for r in range(topn):
            j = idxs[i, r].item()
            row.append({
                "rank": r+1,
                "score": float(vals[i, r].item()),
                "caption": texts[j],
                "text_index": j,
            })
        results.append(row)
    return results


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--ckpt", type=str, required=True)
    # Sources (priority: CSV > TXT > data_root)
    ap.add_argument("--sources_csv", type=str, default=None, help="CSV with column 'Source' listing visual files")
    ap.add_argument("--sources_txt", type=str, default=None, help="TXT listing visual sources, one per line")
    ap.add_argument("--data_root", type=str, default="./dataset", help="Fallback folder scan if CSV/TXT not provided")

    ap.add_argument("--candidates_csv", type=str, required=True, help="CSV with columns: Caption[, Source]")
    ap.add_argument("--out", type=str, default="./retrieval_topn.json")
    ap.add_argument("--topn", type=int, default=20)
    ap.add_argument("--batch_img", type=int, default=64)
    ap.add_argument("--batch_txt", type=int, default=256)
    ap.add_argument("--pdf_pages", type=str, default="first", choices=["first", "all", "skip"])
    ap.add_argument("--pdf_dpi", type=int, default=200)
    args = ap.parse_args()

    device = "cuda" if torch.cuda.is_available() else "cpu"

    # 1) Model & preprocess
    model, preprocess, tokenizer = load_model_from_ckpt(args.ckpt, device)

    # 2) Visual sources
    if args.sources_csv:
        # 获取原始Source和解析后的路径
        original_sources, resolved_paths = list_sources_from_csv(args.sources_csv, base_dir=args.data_root)
        paths = (original_sources, resolved_paths)
    else:
        # 对于非CSV来源，使用路径本身作为原始Source
        resolved_paths = list_sources(args.data_root, args.sources_txt)
        paths = (resolved_paths, resolved_paths)  # 原始Source和解析后的路径相同

    print(f"[Info] Loaded {len(resolved_paths)} visual sources")

    # 3) Encode images
    img_original_sources, img_labels, img_feats = encode_images(paths, model, preprocess, device,
                                                                pdf_pages=args.pdf_pages, pdf_dpi=args.pdf_dpi,
                                                                batch=args.batch_img)
    print(f"[Info] Encoded {len(img_labels)} image items (including PDF pages if selected)")

    # 4) Encode candidate texts
    cand_df = pd.read_csv(args.candidates_csv)
    texts, txt_feats = encode_texts(cand_df, model, tokenizer, device, batch=args.batch_txt)
    print(f"[Info] Encoded {len(texts)} candidate captions")

    # 5) Top-N retrieval per image
    topn_lists = topn_for_each_image(img_feats, txt_feats, texts, args.topn)

    # 6) Dump JSON
    # 6) Dump JSON - 使用原始Source作为image字段
    out_items = []
    for orig_source, lbl, lst in zip(img_original_sources, img_labels, topn_lists):
        out_items.append({"image": orig_source, "topn": lst})

    out_path = Path(args.out)
    out_path.parent.mkdir(parents=True, exist_ok=True)
    with open(out_path, "w", encoding="utf-8") as f:
        json.dump(out_items, f, ensure_ascii=False, indent=2)
    print(f"[Done] Wrote {len(out_items)} entries to {out_path}")


if __name__ == "__main__":
    main()