# s2_clip_ft.py
# -*- coding: utf-8 -*-
import argparse, math
from pathlib import Path

import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, Subset

import pandas as pd
from tqdm import tqdm
import open_clip

from s1_get_train_data import build_loader, build_image_eval_transform, PairsDataset

# ---- Pillow: 处理大图与调色板透明告警 ----
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# 如需完全关闭大图保护：设为 None；更安全做法是提高阈值
Image.MAX_IMAGE_PIXELS = 200_000_000

# ---- 性能小优化 ----
torch.backends.cudnn.benchmark = True


def set_seed(s=42):
    import random, numpy as np
    random.seed(s); np.random.seed(s); torch.manual_seed(s); torch.cuda.manual_seed_all(s)


def clip_loss(img_feats, txt_feats, logit_scale):
    img_feats = F.normalize(img_feats, dim=-1)
    txt_feats = F.normalize(txt_feats, dim=-1)
    logits = logit_scale * img_feats @ txt_feats.t()
    labels = torch.arange(len(img_feats), device=img_feats.device)
    return (F.cross_entropy(logits, labels) + F.cross_entropy(logits.t(), labels)) / 2


def enable_params(params, flag=True):
    for p in params: p.requires_grad = flag


def unfreeze_last_blocks(model, n_vit=1, n_txt=1):
    # 视觉 ViT
    if hasattr(model.visual, "transformer"):
        for blk in model.visual.transformer.resblocks[-n_vit:]:
            enable_params(blk.parameters(), True)
    if hasattr(model.visual, "ln_post"):
        enable_params(model.visual.ln_post.parameters(), True)
    if hasattr(model.visual, "proj"):
        if isinstance(model.visual.proj, torch.nn.Parameter):
            model.visual.proj.requires_grad = True
    # 文本 Transformer
    if hasattr(model, "transformer"):
        for blk in model.transformer.resblocks[-n_txt:]:
            enable_params(blk.parameters(), True)
    if hasattr(model, "ln_final"):
        enable_params(model.ln_final.parameters(), True)
    if hasattr(model, "text_projection"):
        if isinstance(model.text_projection, torch.nn.Parameter):
            model.text_projection.requires_grad = True


def freeze_all(model):
    for p in model.parameters(): p.requires_grad = False


def train_stage(dloader, model, tokenizer, opt, scaler, device, max_steps=None, grad_accum=1):
    model.train()
    step = 0
    running = 0.0
    for images, captions, _ in tqdm(dloader, ncols=100):
        # 统一为 RGB，避免 Palette+Transparency 告警带来的 transform 差异
        images = images.to(device, non_blocking=True)
        tokens = tokenizer(list(captions)).to(device)
        with torch.amp.autocast('cuda'):
            img_f = model.encode_image(images)
            txt_f = model.encode_text(tokens)
            loss = clip_loss(img_f, txt_f, model.logit_scale.exp())
        loss = loss / grad_accum
        scaler.scale(loss).backward()
        if (step + 1) % grad_accum == 0:
            scaler.unscale_(opt)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            scaler.step(opt); scaler.update(); opt.zero_grad(set_to_none=True)
        running += loss.item() * grad_accum
        step += 1
        if max_steps and step >= max_steps: break
    return running / max(1, step)


@torch.no_grad()
def evaluate(val_loader, model, tokenizer, device):
    model.eval()
    img_bank, txt_bank = [], []
    idx_order = []
    for images, captions, idx in tqdm(val_loader, ncols=100, desc="Encode val"):
        images = images.to(device, non_blocking=True)
        tokens = tokenizer(list(captions)).to(device)
        with torch.amp.autocast('cuda'):
            img_f = F.normalize(model.encode_image(images), dim=-1)
            txt_f = F.normalize(model.encode_text(tokens), dim=-1)
        img_bank.append(img_f); txt_bank.append(txt_f); idx_order += idx.tolist()
    I = torch.cat(img_bank, dim=0)
    T = torch.cat(txt_bank, dim=0)
    sims = I @ T.t()

    def recall_at_k(sim, k):
        targets = torch.arange(sim.size(0), device=sim.device)
        _, topk = sim.topk(k, dim=1)
        hits = (topk == targets[:, None]).any(dim=1).float().mean().item()
        return hits

    r1 = recall_at_k(sims, 1); r5 = recall_at_k(sims, 5); r10 = recall_at_k(sims, 10)
    r1_t = recall_at_k(sims.t(), 1); r5_t = recall_at_k(sims.t(), 5); r10_t = recall_at_k(sims.t(), 10)
    mR = (r1+r5+r10+r1_t+r5_t+r10_t)/6.0
    return {"I2T_R@1": r1, "I2T_R@5": r5, "I2T_R@10": r10,
            "T2I_R@1": r1_t, "T2I_R@5": r5_t, "T2I_R@10": r10_t, "mR": mR}


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--csv", type=str, required=True, help="CSV with image_path,caption")
    ap.add_argument("--image_root", type=str, default="")
    ap.add_argument("--model", type=str, default="ViT-B-16")
    ap.add_argument("--pretrained", type=str, default="laion2b_s34b_b88k")
    ap.add_argument("--img_size", type=int, default=224)
    ap.add_argument("--batch", type=int, default=64)
    ap.add_argument("--epochs_a", type=int, default=5)
    ap.add_argument("--epochs_b", type=int, default=10)
    ap.add_argument("--unfreeze_vit", type=int, default=1, help="unfreeze last N visual blocks")
    ap.add_argument("--unfreeze_txt", type=int, default=1, help="unfreeze last N text blocks")
    ap.add_argument("--lr_head_a", type=float, default=1e-3)
    ap.add_argument("--lr_head_b", type=float, default=5e-5)
    ap.add_argument("--lr_backbone_b", type=float, default=5e-6)
    ap.add_argument("--wd", type=float, default=0.01)
    ap.add_argument("--accum", type=int, default=2, help="grad accumulation to reach effective BS")
    ap.add_argument("--val_split", type=float, default=0.2)
    ap.add_argument("--seed", type=int, default=42)
    ap.add_argument("--out", type=str, default="ckpt_clip_ft.pt")
    args = ap.parse_args()

    set_seed(args.seed)
    device = "cuda" if torch.cuda.is_available() else "cpu"

    # ---- data ----
    full_df = pd.read_csv(args.csv)
    n = len(full_df); idx = list(range(n))
    cut = int(n * (1 - args.val_split))
    train_idx, val_idx = idx[:cut], idx[cut:]
    train_csv = Path(args.csv)

    train_loader, _ = build_loader(train_csv, args.image_root, batch_size=args.batch,
                                   train=True, img_size=args.img_size)
    val_ds = PairsDataset(train_csv, args.image_root, train=False, img_size=args.img_size)
    val_ds = Subset(val_ds, val_idx)
    val_loader = DataLoader(val_ds, batch_size=args.batch, shuffle=False, num_workers=4, pin_memory=True)

    # ---- model & tokenizer ----
    model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(
        args.model, pretrained=args.pretrained)
    tokenizer = open_clip.get_tokenizer(args.model)
    model.to(device)

    # ================== Stage A ==================
    freeze_all(model)
    trainable = []
    for n_, p in model.named_parameters():
        if any(k in n_ for k in ["text_projection", "ln_final", "visual.ln_post", "visual.proj"]):
            p.requires_grad = True; trainable.append(p)
    model.logit_scale.requires_grad = True
    trainable.append(model.logit_scale)

    opt = torch.optim.AdamW(trainable, lr=args.lr_head_a, weight_decay=args.wd, betas=(0.9, 0.98))
    scaler = torch.amp.GradScaler('cuda')

    for ep in range(args.epochs_a):
        loss = train_stage(train_loader, model, tokenizer, opt, scaler, device, grad_accum=args.accum)
        metrics = evaluate(val_loader, model, tokenizer, device)
        print(f"[StageA][Epoch {ep+1}] loss={loss:.4f} mR={metrics['mR']:.4f} "
              f"R@1={metrics['I2T_R@1']:.3f}/{metrics['T2I_R@1']:.3f}")

    # ================== Stage B ==================
    freeze_all(model)
    trainable_head = []
    for n_, p in model.named_parameters():
        if any(k in n_ for k in ["text_projection", "ln_final", "visual.ln_post", "visual.proj"]):
            p.requires_grad = True; trainable_head.append(p)
    model.logit_scale.requires_grad = True
    trainable_head.append(model.logit_scale)

    unfreeze_last_blocks(model, n_vit=args.unfreeze_vit, n_txt=args.unfreeze_txt)

    backbone_params, head_params = [], []
    for n_, p in model.named_parameters():
        if not p.requires_grad: continue
        (head_params if any(k in n_ for k in ["text_projection", "ln_final", "visual.ln_post", "visual.proj"])
         else backbone_params).append(p)

    opt = torch.optim.AdamW([
        {"params": head_params, "lr": args.lr_head_b},
        {"params": backbone_params, "lr": args.lr_backbone_b},
    ], weight_decay=args.wd, betas=(0.9, 0.98))
    # >>> 保存 param group 引用，避免张量成员比较导致的歧义 <<<
    head_group, backbone_group = opt.param_groups

    scaler = torch.amp.GradScaler('cuda')

    best_mR, best_state = -1.0, None
    total_steps = args.epochs_b * max(1, math.ceil(len(train_loader)))
    warmup = min(1000, total_steps // 10)

    def cosine_lr(step, base_lr):
        t = (step - warmup) / max(1, total_steps - warmup)
        t = min(max(t, 0.0), 1.0)
        return 0.5 * (1 + math.cos(math.pi * t)) * base_lr

    step_count = 0
    for ep in range(args.epochs_b):
        model.train()
        running = 0.0
        for images, captions, _ in tqdm(train_loader, ncols=100, desc=f"StageB Ep{ep+1}"):

            # ---- 设置两个 param group 的学习率（预热 + 余弦）----
            for g in opt.param_groups:
                base = args.lr_head_b if g is head_group else args.lr_backbone_b
                if step_count < warmup:
                    g["lr"] = base * (step_count / max(1, warmup))
                else:
                    g["lr"] = cosine_lr(step_count, base)

            images = images.to(device, non_blocking=True)
            tokens = tokenizer(list(captions)).to(device)
            with torch.amp.autocast('cuda'):
                img_f = model.encode_image(images)
                txt_f = model.encode_text(tokens)
                loss = clip_loss(img_f, txt_f, model.logit_scale.exp())
            loss = loss / args.accum
            scaler.scale(loss).backward()
            if (step_count + 1) % args.accum == 0:
                scaler.unscale_(opt)
                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
                scaler.step(opt); scaler.update(); opt.zero_grad(set_to_none=True)
            running += loss.item() * args.accum
            step_count += 1

        metrics = evaluate(val_loader, model, tokenizer, device)
        print(f"[StageB][Epoch {ep+1}] loss={running/max(1, len(train_loader)):.4f} mR={metrics['mR']:.4f} "
              f"I2T@1/5/10={metrics['I2T_R@1']:.3f}/{metrics['I2T_R@5']:.3f}/{metrics['I2T_R@10']:.3f} "
              f"T2I@1/5/10={metrics['T2I_R@1']:.3f}/{metrics['T2I_R@5']:.3f}/{metrics['T2I_R@10']:.3f}")

        if metrics["mR"] > best_mR:
            best_mR = metrics["mR"]
            best_state = {k: v.cpu() for k, v in model.state_dict().items()}
            torch.save({"state_dict": best_state, "args": vars(args), "metrics": metrics}, args.out)
            print(f"==> saved to {args.out}")


if __name__ == "__main__":
    main()
