from gensim.models import Word2Vec
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np

# 示例语料，包含语义多义词
sentences = [
    ["I", "went", "to", "the", "bank", "to", "deposit", "money"],
    ["The", "river", "bank", "was", "beautiful", "at", "sunset"]
]

# 训练 Word2Vec 模型
word2vec_model = Word2Vec(sentences, vector_size=50, window=3, min_count=1, workers=4)

# 查看bank的嵌入向量
bank_vector = word2vec_model.wv["bank"]
print("Word2Vec 中 'bank' 的嵌入向量:")
print(bank_vector[:5], "...")    # 仅展示前5个维度，避免输出过长

# 分别计算bank在两种上下文中的相似度
context1 = ["deposit", "money"]    # 第一种上下文
context2 = ["river", "beautiful"]    # 第二种上下文

# 获取上下文词向量
context1_vectors = [word2vec_model.wv[word] for word in context1 if word in word2vec_model.wv]
context2_vectors = [word2vec_model.wv[word] for word in context2 if word in word2vec_model.wv]

# 计算与上下文的平均相似度
def calculate_average_similarity(target_vector, context_vectors):
    similarities = [cosine_similarity([target_vector], [vec])[0][0] for vec in context_vectors]
    return np.mean(similarities)

similarity_context1 = calculate_average_similarity(bank_vector, context1_vectors)
similarity_context2 = calculate_average_similarity(bank_vector, context2_vectors)

print(f"\n'bank' 与上下文 ['deposit', 'money'] 的相似度: {similarity_context1:.4f}")
print(f"'bank' 与上下文 ['river', 'beautiful'] 的相似度: {similarity_context2:.4f}")

# 比较bank在不同上下文中的相似性表现
if similarity_context1 > similarity_context2:
    print("\nWord2Vec 模型认为 'bank' 更接近 'deposit, money'")
else:
    print("\nWord2Vec 模型认为 'bank' 更接近 'river, beautiful'")