#!/usr/bin/env python3
import torch
from tqdm import tqdm
from transformers import GPT2Tokenizer, GPT2Model
import os
import pandas as pd  # 新增：导入pandas处理csv

# ========== 配置路径 ==========
csv_path = "/home/fang_guotong/projects/3090server/add_suffix.csv"  # 改为csv文件路径
tokens_out_path = "/home/fang_guotong/projects/3090server/add_suffix_company_input_ids.pt"
gpt2_embs_out_path = "/home/fang_guotong/projects/3090server/add_suffix_company_gpt2_text_embeddings.pt"
device = torch.device(f'cuda:{1}')
# ========== 初始化 GPT-2 模型 ==========
print("[INFO] Loading GPT-2 tokenizer & text encoder...")
tokenizer = GPT2Tokenizer.from_pretrained("/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e")
text_encoder = GPT2Model.from_pretrained("/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e")
text_encoder.eval().to(device)

# GPT-2 没有 pad_token，手动指定一个
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

# ========== 读取 csv 中的 name 列 ==========
print(f"[INFO] Loading 'new_name' column from csv: {csv_path}")
# 读取csv文件，提取name列，过滤空值
df = pd.read_csv(csv_path)
# 确保读取到name列，过滤空字符串和NaN
captions = df['new_name'].dropna().astype(str).apply(lambda x: x.strip()).tolist()
captions = [cap for cap in captions if cap]  # 再次过滤空字符串（避免仅含空格的情况）

print(f"[INFO] Loaded {len(captions)} valid entries from 'name' column")

# ========== Tokenize captions ==========
print("[INFO] Tokenizing captions...")
tokenized = tokenizer(
    captions,
    padding="max_length",
    truncation=True,
    max_length=64,
    return_tensors="pt"
)
input_ids = tokenized["input_ids"]
attention_mask = tokenized["attention_mask"]
print(f"[INFO] Tokenized shape: {input_ids.shape}")  # (N, 32)

# 保存 tokens
torch.save(input_ids, tokens_out_path)
print(f"[INFO] Saved token ids to: {tokens_out_path}")

# ========== 生成 GPT-2 text embeddings ==========
print("[INFO] Generating GPT-2 text embeddings...")
embs = []
batch_size = 256

for i in tqdm(range(0, len(captions), batch_size)):
    batch_ids = input_ids[i:i+batch_size].to(device)
    batch_mask = attention_mask[i:i+batch_size].to(device)

    with torch.no_grad():
        outputs = text_encoder(batch_ids, attention_mask=batch_mask)
        # 平均池化（考虑attention mask，忽略padding）
        emb = (outputs.last_hidden_state * batch_mask.unsqueeze(-1)).sum(dim=1)
        emb = emb / batch_mask.sum(dim=1, keepdim=True)
        embs.append(emb.cpu())

embs = torch.cat(embs, dim=0)
print(f"[INFO] GPT-2 embeddings shape: {embs.shape}")  # (N, 768)

# 保存 embeddings
torch.save(embs, gpt2_embs_out_path)
print(f"[INFO] Saved GPT-2 embeddings to: {gpt2_embs_out_path}")