# ====================================================================================
# 文件: preprocessing/pre_utils.py
# 描述: [V1 新增] 存放 CIKGRec 和 CoLaKG 预处理所需的辅助函数。
#      (CIKGRec) 文本清洗、LLM 提示生成
#      (CoLaKG)  SBERT 嵌入
#      (SDKR)    图归一化 (来自 src/dataloader.py)
# ====================================================================================

import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import torch
import numpy as np
import scipy.sparse as sp
from tqdm import tqdm
from sentence_transformers import SentenceTransformer

# --- 1. CIKGRec: 文本清洗和提示词 ---

# NLTK 数据下载与变量初始化
try:
    nltk.data.find('corpora/stopwords')
    nltk.data.find('corpora/wordnet')
    nltk.data.find('tokenizers/punkt')
except LookupError:
    print("[PreUtils] NLTK data (stopwords, wordnet, punkt) not found. Downloading...")
    nltk.download('stopwords', quiet=True)
    nltk.download('wordnet', quiet=True)
    nltk.download('punkt', quiet=True)

sw = stopwords.words('english')
lemmatizer = WordNetLemmatizer()


def clean_text(text):
    """
    来自 CIKGRec/utils.py 的文本清洗函数
    """
    global sw, lemmatizer
    text = str(text).lower()
    text = re.sub(r"[^a-zA-Z?.!,¿]+", " ", text)  # 保留字母和基本标点
    punctuations = '@#!?+&*[]-%.:/();$=><|{}^' + "'`" + '_'
    for p in punctuations:
        text = text.replace(p, '')  # 移除标点

    # (修复 CIKGRec/utils.py 中的 bug：split() 之前不能是 list)
    text_words = text.split()
    text = [word.lower() for word in text_words if word.lower() not in sw]
    text = [lemmatizer.lemmatize(word) for word in text]
    return " ".join(text)


def get_prompt_generate(history, field='item'):
    """
    来自 CIKGRec/call_llm.py
    """
    f = field.strip('s')
    prompt = '[' + ', '.join(history) + ']'
    return prompt


def get_system_generate(field='item'):
    """
    来自 CIKGRec/call_llm.py (修改为不接收 history)
    """
    f = field.strip('s')
    return f"You will be provided with a list of {f} titles an anonymous user has liked. Your task is to infer the user's TOP 5 core interests based on this list and your extensive knowledge. List ONLY the top 5 interests, separated by commas. NO descriptions, NO explanations, NO introductory text."


# --- 2. CoLaKG: SBERT 嵌入 ---

def get_sbert_embeddings(texts, sbert_model_name, device, batch_size=256):
    """
    来自 CoLaKG/llm_code/get_text_embedding.py 的逻辑
    """
    try:
        model = SentenceTransformer(sbert_model_name, device=device)
    except Exception as e:
        print(f"[PreUtils] 无法加载 SBERT 模型 '{sbert_model_name}'. 错误: {e}")
        print("[PreUtils] 请确保网络连接正常或模型已本地缓存。")
        raise e

    print(f"[PreUtils] SBERT 模型 {sbert_model_name} 加载成功。")

    all_embeddings = []

    # CoLaKG 使用 'model.encode'，这更简单
    all_embeddings = model.encode(
        texts,
        batch_size=batch_size,
        show_progress_bar=True,
        convert_to_tensor=True,
        device=device
    )

    print(f"[PreUtils] 文本嵌入生成完毕。 Shape: {all_embeddings.shape}")
    return all_embeddings.cpu()  # 返回到 CPU 以便保存


# --- 3. SDKR: 图归一化 (来自 src/dataloader.py) ---

def get_sparse_adj(adj_matrix, device):
    """
    将 scipy 稀疏矩阵转换为标准化的 PyTorch 稀疏张量 (LightGCN 风格)。
    (来自 SDKR/src/dataloader.py)
    """
    print("[PreUtils] 开始进行 LightGCN 图归一化...")
    sp_adj = adj_matrix.tocoo()

    indices = torch.LongTensor(np.vstack((sp_adj.row, sp_adj.col)))
    values = torch.FloatTensor(sp_adj.data)

    adj = torch.sparse.FloatTensor(indices, values, sp_adj.shape).to(device)

    # D^-1/2 * A * D^-1/2
    row_sum = torch.sparse.sum(adj, dim=1).to_dense()
    d_inv_sqrt = torch.pow(row_sum, -0.5)
    d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
    d_mat_inv_sqrt = torch.diag(d_inv_sqrt).to(device)  # 确保在 GPU 上

    # (A+I)
    adj = adj + torch.sparse.FloatTensor(
        torch.stack([torch.arange(adj.shape[0]), torch.arange(adj.shape[0])]).to(device),
        torch.ones(adj.shape[0]).to(device),
        adj.shape
    )

    # 重新计算 D^-1/2
    row_sum = torch.sparse.sum(adj, dim=1).to_dense()
    d_inv_sqrt = torch.pow(row_sum, -0.5)
    d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
    d_mat_inv_sqrt = torch.diag(d_inv_sqrt).to(device)  # 确保在 GPU 上

    norm_adj = torch.sparse.mm(adj, d_mat_inv_sqrt)
    norm_adj = torch.sparse.mm(norm_adj, d_mat_inv_sqrt.t())  # (D^-1/2 * A * D^-1/2)

    print("[PreUtils] 图归一化完成。")
    return norm_adj.cpu()  # 返回到 CPU 以便保存