# Roo准备就绪！
import torch
from PIL import Image
from torch.utils.data import DataLoader
from transformers import AdamW, CLIPModel, CLIPProcessor

device = "cuda" if torch.cuda.is_available() else "cpu"

# 加载模型和处理器
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

# 准备中文训练数据（示例）
train_texts = ["熊猫", "大熊猫", "黑白熊猫"]  # 中文标签
# 对应图像路径
train_images = ["panda1.jpg", "panda2.jpg", "panda3.jpg"]


def collate_fn(batch):
    # 解包批次数据
    image_paths, texts = zip(*batch)
    # texts = batch["texts"]
    images = [Image.open(p).convert("RGB") for p in image_paths]
    # images = [Image.open(p).convert("RGB") for p in batch["images"]]
    image_inputs = processor(images=images, return_tensors="pt").to(device)
    text_inputs = processor(text=texts, return_tensors="pt").to(device)
    return {"image_inputs": image_inputs, "text_inputs": text_inputs}


# 创建数据集和数据加载器
dataset = list(zip(train_images, train_texts))
train_dataloader = DataLoader(
    dataset, batch_size=2, shuffle=True, collate_fn=collate_fn
)

# 定义优化器和损失函数（CLIP使用对比损失）
optimizer = AdamW(model.parameters(), lr=5e-5)
loss_fn = torch.nn.CosineEmbeddingLoss()  # 或使用CLIP原生的NT-Xent损失

# 微调循环（简化）
for epoch in range(3):  # 通常需要3-5轮
    model.train()
    for batch in train_dataloader:
        image_emb = model.get_image_features(
            **batch["image_inputs"]
        )  # (batch_size, dim)
        text_emb = model.get_text_features(**batch["text_inputs"])  # (batch_size, dim)

        # 计算相似度矩阵（batch_size x batch_size）
        logits = (
            image_emb @ text_emb.T
        ) / 0.07  # 0.07是温度系数(temperature scaling)，CLIP原生参数

        # 构造对比损失（正样本对角线，负样本其余）
        labels = torch.arange(logits.size(0)).to(device)
        loss = loss_fn(logits, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
