import warnings
import gensim
import numpy as np

from utils.Segment import Seg

# 忽略 NotOpenSSLWarning 警告
warnings.filterwarnings("ignore", category=UserWarning, module="urllib3")

seg = Seg()

def preprocess_text(text):
    """对文本进行预处理，包括分词和去除停用词"""
    filtered_words = seg.cut_for_search(text)
    return filtered_words

def read_sentences_from_file(file_path):
    """从文件中读取句子"""
    with open(file_path, 'r', encoding='utf-8') as file:
        sentences = [line.strip() for line in file]
    return sentences

def calculate_word2vec_similarity(text1, text2, model):
    tokens1 = preprocess_text(text1)
    tokens2 = preprocess_text(text2)

    if len(tokens2) == 0 or len(tokens1) == 0:
        return 0

    vec1 = np.mean([model.wv[token] for token in tokens1 if token in model.wv], axis=0)
    vec2 = np.mean([model.wv[token] for token in tokens2 if token in model.wv], axis=0)

    # 计算余弦相似度
    dot_product = np.dot(vec1, vec2)
    norm_vec1 = np.linalg.norm(vec1)
    norm_vec2 = np.linalg.norm(vec2)
    similarity = dot_product / (norm_vec1 * norm_vec2) + 1

    return similarity

def find_top_n_similar_sentences(target_sentence, sentences, model, topN):
    similarities = []
    for sentence in sentences:
        similarity = calculate_word2vec_similarity(target_sentence, sentence, model)
        similarities.append((sentence, similarity))

    # 排序并获取前 topN 个
    top_n = sorted(similarities, key=lambda x: x[1], reverse=True)[:topN]
    return top_n


# 加载模型
model_path = "word2vec_model/my_model.model"
model = gensim.models.Word2Vec.load(model_path)

target_sentence = "我借呗不能用"
search_file_path = "data/test.txt"
topN=3

#输出输入文本中topN个与目标句子最相似的句子
sentences = read_sentences_from_file(search_file_path)
top_n_similar_sentences = find_top_n_similar_sentences(target_sentence, sentences, model, topN)
for sentence, similarity in top_n_similar_sentences:
    if similarity > 0.9:
        print(f"Sentence: {sentence}, Similarity: {similarity:.4f}")
