import numpy as np
from text2vec import SentenceModel
from sklearn.metrics.pairwise import cosine_similarity
import time
import os


def load_keywords(path):
    """从文件中加载关键词"""
    with open(path, 'r', encoding='utf-8') as file:
        # keywords = [line.split()[0] for line in file.readlines()]
        keywords = [line.strip() for line in file.readlines()]
    return keywords


# 初始化 SentenceModel
model_path = "model/shibing624-text2vec-base-chinese"
model = SentenceModel(model_path)

# 从本地文件加载关键词
keywords_file_path = "dict/merged_dictionary.dat"
keywords_dict = load_keywords(keywords_file_path)

# 编码词典中的关键词
keyword_embeddings = model.encode(keywords_dict)


def find_similar_keywords(query, top_n=5):
    # 记录开始时间
    start_time = time.time()

    # 编码用户输入的查询词
    query_embedding = model.encode([query])

    # 计算查询词与词典中每个关键词的余弦相似度
    similarities = cosine_similarity(query_embedding, keyword_embeddings)

    # 获取最相似的 top_n 个关键词
    top_indices = np.argsort(similarities[0])[::-1][:top_n]
    similar_keywords = [(keywords_dict[i], similarities[0][i]) for i in top_indices]

    # 计算执行时间
    elapsed_time = time.time() - start_time

    return similar_keywords, elapsed_time


# 测试函数，添加循环允许多次查询
if __name__ == "__main__":
    while True:
        query = input("请输入查询词 (输入'退出'结束查询): ")
        if query.lower() == '退出':
            break
        similar_keywords, elapsed_time = find_similar_keywords(query)
        print(f"与 '{query}' 语义上接近的关键词和相应的相似度分数为:")
        for keyword, score in similar_keywords:
            print(f"{keyword}: {score:.4f}")
        print(f"查询耗时: {elapsed_time:.4f} 秒\n")
