import os.path

import torch
from transformers import AutoModel, AutoTokenizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

def calculate_bert_similarity(model_path, text1, text2):
    # 加载模型和分词器
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModel.from_pretrained(model_path)
    #print(model)

    dist = torch.load(os.path.join(model_path, "pytorch_model.bin"))
    for param in dist:
        print(param)

    #for name, param in model.named_parameters():
    #    print(name)

    # 分词和编码
    inputs = tokenizer([text1, text2], padding=True, truncation=True, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
        embeddings = outputs.last_hidden_state[:, 0, :].numpy()  # 提取 CLS 令牌的嵌入

    # 将 numpy 数组转换为 Tensor
    embeddings_tensor = torch.tensor(embeddings)

    # 计算余弦相似度
    similarity = cosine_similarity(embeddings_tensor[0].unsqueeze(0), embeddings_tensor[1].unsqueeze(0))
    return similarity.item()

# 示例文本
text1 = "花呗什么时候还款"
text2 = "支付宝"

# 模型路径--预训练的模型，可以根据业务进行微调（分类，查询相似语句等）bertEvaluate.py
# roformer-sim-small-chinese => pooler.dense.weight
# roformer_chinese_sim_char_base => roformer.pooler.dense.weight
# simbert-base-chinese => bert.pooler.dense.weight
model_path = "simbert-base-chinese"

# 计算相似度
bert_similarity = calculate_bert_similarity(model_path, text1, text2)
print(bert_similarity)
