#!/usr/bin/env python3
import torch
from tqdm import tqdm
from transformers import GPT2Tokenizer, GPT2Model
import os

# ========== 配置路径 ==========
captions_path = "/home/fang_guotong/projects/3090server/captions.txt"
tokens_out_path = "/home/fang_guotong/projects/3090server/input_ids.pt"
gpt2_embs_out_path = "/home/fang_guotong/projects/3090server/gpt2_text_embeddings.pt"

# ========== 初始化 GPT-2 模型 ==========
print("[INFO] Loading GPT-2 tokenizer & text encoder...")
tokenizer = GPT2Tokenizer.from_pretrained("/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e")
text_encoder = GPT2Model.from_pretrained("/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e")
text_encoder.eval().cuda()

# GPT-2 没有 pad_token，手动指定一个
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

# ========== 读取 captions ==========
print(f"[INFO] Loading captions from: {captions_path}")
with open(captions_path, "r", encoding="utf-8") as f:
    captions = [line.strip() for line in f if line.strip()]

print(f"[INFO] Loaded {len(captions)} captions")

# ========== Tokenize captions ==========
print("[INFO] Tokenizing captions...")
tokenized = tokenizer(
    captions,
    padding="max_length",
    truncation=True,
    max_length=32,
    return_tensors="pt"
)
input_ids = tokenized["input_ids"]
attention_mask = tokenized["attention_mask"]
print(f"[INFO] Tokenized shape: {input_ids.shape}")  # (N, 32)

# 保存 tokens
torch.save(input_ids, tokens_out_path)
print(f"[INFO] Saved token ids to: {tokens_out_path}")

# ========== 生成 GPT-2 text embeddings ==========
print("[INFO] Generating GPT-2 text embeddings...")
embs = []
batch_size = 256

for i in tqdm(range(0, len(captions), batch_size)):
    batch_ids = input_ids[i:i+batch_size].cuda()
    batch_mask = attention_mask[i:i+batch_size].cuda()

    with torch.no_grad():
        outputs = text_encoder(batch_ids, attention_mask=batch_mask)
        # outputs.last_hidden_state: (B, T, D)
        # 取平均池化或首 token
        emb = (outputs.last_hidden_state * batch_mask.unsqueeze(-1)).sum(dim=1)
        emb = emb / batch_mask.sum(dim=1, keepdim=True)
        embs.append(emb.cpu())

embs = torch.cat(embs, dim=0)
print(f"[INFO] GPT-2 embeddings shape: {embs.shape}")  # (N, 768)

# 保存 embeddings
torch.save(embs, gpt2_embs_out_path)
print(f"[INFO] Saved GPT-2 embeddings to: {gpt2_embs_out_path}")
