import torch
from transformers import AutoModel, AutoTokenizer, RobertaModel, RoFormerTokenizer
from sklearn.metrics.pairwise import cosine_similarity


def calculate_bert_similarity_topN(model_path, sentences, target_sentence, top_n=5):
    # 加载模型和分词器
    tokenizer = RoFormerTokenizer.from_pretrained(model_path)
    model = RobertaModel.from_pretrained(model_path)

    # 分词和编码
    all_sentences = [target_sentence] + sentences
    inputs = tokenizer(all_sentences, padding=True, truncation=True, return_tensors="pt")

    with torch.no_grad():
        outputs = model(**inputs)
        embeddings = outputs.last_hidden_state[:, 0, :].numpy()  # 提取 CLS 令牌的嵌入

    # 将 numpy 数组转换为 Tensor
    embeddings_tensor = torch.tensor(embeddings)

    # 计算余弦相似度
    similarities = cosine_similarity(embeddings_tensor[0].unsqueeze(0), embeddings_tensor[1:])

    # 获取 top N 最相似的句子索引
    top_indices = similarities.argsort()[0][-top_n:][::-1]

    # 输出 top N 个与目标句子相关的句子
    top_sentences = [(all_sentences[i], similarities[0][i]) for i in top_indices]
    return top_sentences

def calculate_bert_similarity(model_path, text1, text2):
    # 加载模型和分词器
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModel.from_pretrained(model_path)

    # 分词和编码
    inputs = tokenizer([text1, text2], padding=True, truncation=True, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)
        embeddings = outputs.last_hidden_state[:, 0, :].numpy()  # 提取 CLS 令牌的嵌入

    # 将 numpy 数组转换为 Tensor
    embeddings_tensor = torch.tensor(embeddings)

    # 计算余弦相似度
    similarity = cosine_similarity(embeddings_tensor[0].unsqueeze(0), embeddings_tensor[1].unsqueeze(0))
    return similarity.item()

# 从控制台获取文件路径 /Users/apple/PycharmProjects/sentence-similarity/data/test.txt
file_path = input("请输入文本文件路径: ")
# 读取文件中的每一行作为一个句子
with open(file_path, 'r', encoding='utf-8') as file:
    sentences = file.readlines()
    sentences = [sentence.strip() for sentence in sentences]

# 目标句子
target_sentence = input("请输入目标句子: ")

# 请输入查询相关句子数量 -》不想还花呗了，利息太高
top_n = int(input("请输入查询相关句子数量: "))

# 模型路径--预训练的模型，可以根据业务进行微调（分类，查询相似语句等）bertEvaluate.py
#"/Users/apple/PycharmProjects/sentence-similarity/chinese_L-12_H-768_A-12"  roformer_chinese_sim_char_base
model_path = input("请输入Bert模型路径: ")

# 计算相似度
top_sentences = calculate_bert_similarity_topN(model_path, sentences, target_sentence, top_n)

# 输出topN个与目标句子相关的句子
for sentence, similarity in top_sentences:
    print(f"Sentence: {sentence} | Similarity: {similarity:.4f}")
