# kg_to_bert.py
import torch
from transformers import BertTokenizer, BertModel
from transformers import RobertaTokenizer, RobertaModel

# 判断是否可用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

def _choose_tokenizer_and_model_by_path(model_path):
    """
    基于 model_path 的字符串手动判断使用 Bert 还是 Roberta。
    规则（优先级）:
      - path 包含 'codebert' 或 'roberta' -> 使用 Roberta
      - path 包含 'bert' -> 使用 Bert
      - 否则按尝试顺序：先尝试 Bert，再尝试 Roberta（容错）
    返回: (tokenizer, model, model_type_str)
    """
    lower = model_path.lower()
    # 显性关键词判断
    if 'codebert' in lower or 'roberta' in lower:
        try:
            tok = RobertaTokenizer.from_pretrained(model_path)
            mdl = RobertaModel.from_pretrained(model_path).to(device)
            return tok, mdl, 'roberta'
        except Exception as e:
            # 如果加载失败，回落到尝试 Bert
            print(f"尝试以 Roberta 加载失败: {e}，回退尝试 Bert 加载。")

    if 'bert' in lower:
        try:
            tok = BertTokenizer.from_pretrained(model_path)
            mdl = BertModel.from_pretrained(model_path).to(device)
            return tok, mdl, 'bert'
        except Exception as e:
            print(f"尝试以 Bert 加载失败: {e}，回退尝试 Roberta 加载。")

    # 容错的尝试顺序：先 Bert，再 Roberta
    try:
        tok = BertTokenizer.from_pretrained(model_path)
        mdl = BertModel.from_pretrained(model_path).to(device)
        return tok, mdl, 'bert'
    except Exception as e:
        print(f"Bert 加载失败: {e}，尝试 Roberta...")
        try:
            tok = RobertaTokenizer.from_pretrained(model_path)
            mdl = RobertaModel.from_pretrained(model_path).to(device)
            return tok, mdl, 'roberta'
        except Exception as e2:
            # 若两者都失败，则抛出原始异常
            raise RuntimeError(f"既无法以 Bert 也无法以 Roberta 加载模型: {e2}")

def process_with_bert(model_path, text):
    """
    使用 Bert/Roberta 处理文本（GPU支持）
    - 根据 model_path 手动选择 BertTokenizer/BertModel 或 RobertaTokenizer/RobertaModel
    - 将长文本分块（按行拼块）并对每块进行 tokenizer/model 推理
    - 合并各块的 [CLS]（即第 0 个 token）表示的均值，返回 L2 归一化向量 (torch.Tensor)
    """
    tokenizer, model, model_type = _choose_tokenizer_and_model_by_path(model_path)
    model.eval()

    # 根据模型配置决定真正的最大支持长度（位置嵌入大小）
    model_max_len = getattr(model.config, "max_position_embeddings", 512)
    # 我们通常采用不超过512的值，但以 model 的能力为准
    max_seq_length = min(512, model_max_len)

    # 尝试把 tokenizer 的 model_max_length 对齐（有些 tokenizer 会使用这个值来截断）
    try:
        tokenizer.model_max_length = max_seq_length
    except Exception:
        pass

    # 将知识图谱文本按行分割，然后拼接成块（避免单块超过 max_seq_length）
    sentences = text.split('\n')
    chunks = []
    current_chunk = []
    current_length = 0

    for sentence in sentences:
        # 使用 tokenizer.tokenize 来估计长度（对 Bert/Roberta 都适用）
        tokens = tokenizer.tokenize(sentence)
        tlen = len(tokens)
        # -2 留给特殊 token ([CLS], [SEP] 等)
        if current_length + tlen < max_seq_length - 2:
            current_chunk.append(sentence)
            current_length += tlen
        else:
            if current_chunk:
                chunks.append(" ".join(current_chunk))
            # start new chunk with current sentence
            current_chunk = [sentence]
            current_length = tlen

    if current_chunk:
        chunks.append(" ".join(current_chunk))

    all_outputs = []
    for chunk in chunks:
        # tokenizer 返回张量，不允许超过 max_length，否则我们在传给 model 前再保险截断
        inputs = tokenizer(
            chunk,
            return_tensors="pt",
            max_length=max_seq_length,
            truncation=True,
            padding="max_length"
        )

        # 将 tensors 移到 device
        inputs = {k: v.to(device) for k, v in inputs.items()}

        # 保险：再检查 input_ids 的长度，确保不会超过模型 position embedding 的硬限制
        input_ids = inputs.get("input_ids")
        if input_ids is not None and input_ids.size(1) > model_max_len:
            # 截断所有相关 tensor（input_ids, attention_mask, token_type_ids 如果存在）
            for k in ["input_ids", "attention_mask", "token_type_ids"]:
                if k in inputs:
                    inputs[k] = inputs[k][:, :model_max_len]

        with torch.no_grad():
            outputs = model(**inputs)

        # 对于 BERT/Roberta，通常第 0 个 token 对应 [CLS] / <s>
        cls_output = outputs.last_hidden_state[:, 0, :]  # shape (1, hidden_size)
        all_outputs.append(cls_output.cpu())

    if not all_outputs:
        return None

    # 合并（对 chunk 的 CLS 向量取均值），并做 L2 归一化
    cpu_outputs = torch.cat(all_outputs, dim=0)  # (num_chunks, H)
    combined = torch.mean(cpu_outputs, dim=0)    # (H,)

    norm = combined.norm(p=2)
    if norm.item() > 0:
        combined = combined / (norm + 1e-12)

    return combined  # torch.Tensor, shape (hidden_size,)

# 接口: 处理知识图谱文本并生成BERT输出
def process_graph_text_with_bert(model_path, graph_text):
    """
    处理知识图谱文本并生成BERT输出（GPU支持）
    Args:
        model_path: BERT/Roberta 模型路径
        graph_text: 知识图谱文本

    Returns:
        bert_output: BERT 输出 (torch.Tensor)
    """
    return process_with_bert(model_path, graph_text)
