import jieba
import re
import numpy as np
from sklearn.decomposition import PCA
import gensim
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import matplotlib


f = open("./MNIST/sanguo.txt", "r", encoding="utf-8")
lines = []
for line in f:
    temp = jieba.lcut(line)
    words = []
    for i in temp:
        # 过滤标点符号
        i = re.sub("[\s+\.\!\/_,$%^*()\"'“”，。!@<（）：‘’《》]+|[+你——~、；：]"," ", i).strip()
        if len(i) > 0:
            words.append(i)
    if len(words) > 0:
        lines.append(words)

print(lines[:10])
model = Word2Vec(lines, vector_size=50, window=5, min_count=10, epochs=50, sg=1, negative=5)

print(f"model.wv.get_vector('刘备')：{model.wv.get_vector('刘备')}")
res=model.wv.most_similar("吕布", topn=10)
print(f"res:{res}")

raw_wordVec = []
word2ind = {}
for i, w in enumerate(model.wv.index_to_key):
    # 每个词的词向量
    raw_wordVec.append(model.wv[w])
    word2ind[w] = i

raw_wordVec = np.array(raw_wordVec)
X_reduced = PCA(n_components=2).fit_transform(raw_wordVec)
print(f"X_reduced.shape:{X_reduced.shape}")
fig = plt.figure(figsize=(15, 10))
ax = fig.gca()
ax.set_facecolor("white")
ax.plot(X_reduced[:, 0], X_reduced[:, 1], ".", markersize=1, alpha=0.3, color="black")

words = ["孙权", "刘备", "曹操", "周瑜", "诸葛亮", "司马懿", "张飞", "汉献帝"]
font = matplotlib.font_manager.FontProperties(fname="./MNIST/华文仿宋.ttf", size=16)
for w in words:
    if w in word2ind:
        idx = word2ind[w]
        xy = X_reduced[idx]
        plt.plot(xy[0], xy[1], ".", alpha=1, color="red", markersize=10)
        plt.text(xy[0], xy[1], w, alpha=1, fontproperties=font, color="green")
plt.show()