from sklearn.decomposition import PCA
import numpy as np
from gensim.models import Word2Vec
import matplotlib.pyplot as plt

# 加载训练好的模型
model = Word2Vec.load('word2vec.model')
words = ['和平', '事业', '历史', '领导', '北京', '新春', '发展', '掌声', '时代',
         '财富', '健康', '经济', '汽水', '作风', '安徽', '历史', '核心', '观念']  # 选定列表中的词语
words_vector_list = []
for word in words:
    words_vector_list.append(model.wv[word])  # 上述词语的词向量列表
words_vector_list = np.array(words_vector_list)  # 转换为 array

pca = PCA(n_components=2)  # 词向量降成二维
results = pca.fit_transform(words_vector_list)  # 获得二维词向量

plt.rcParams['font.sans-serif'] = ['SimHei']  # 解决中文无法显示的问题
plt.rcParams['axes.unicode_minus'] = False
x = []
y = []
for i in results:  # 将降维后的词向量可视化
    x.append(i[0])
    y.append(i[1])
x = np.array(x)
y = np.array(y)
plt.scatter(x, y, color='green')  # 散点图并标注词语
for i in range(len(words)):
    plt.text(x[i], y[i], words[i], color='black')
plt.show()
