from sklearn.metrics.pairwise import cosine_similarity
import torch
from transformers import BertTokenizer, BertModel

# 加载模型和分词器（首次加载时会自动下载）
tokenizer = BertTokenizer.from_pretrained('hfl/chinese-roberta-wwm-ext')
model = BertModel.from_pretrained('hfl/chinese-roberta-wwm-ext')

def get_bert_embedding(text):
    """获取文本的 BERT 嵌入（取所有 token 隐藏状态均值）"""
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
    return outputs.last_hidden_state.mean(dim=1).squeeze()

def compute_bert_similarity(text1, text2):
    """计算两个文本的 BERT 余弦相似度"""
    embedding1 = get_bert_embedding(text1)
    embedding2 = get_bert_embedding(text2)
    similarity = cosine_similarity([embedding1.numpy()], [embedding2.numpy()])
    return similarity[0][0]