#显示数据集之间的关系

import word2vec_CBOW as cw
import torch
import torch.nn as nn
import re
from matplotlib import pyplot as plt
def read_embedding_file(file_path):
    embedding_dict = {}
    with open(file_path, 'r', encoding='utf-8') as f:
        word = None
        vector_parts = []
        for line in f:
            line = line.strip()
            if not line:
                continue  # 跳过空行
            if word is None:
                parts = line.split(':', 1)
                word = parts[0] # 第一行是词
            else:
                # 后续行是向量的一部分，可能包含多个空格分隔的数值
                if line=="\":[" :continue
                vector_parts.extend(line.split())
                #vector_parts = [re.sub(r'^\[', '', part) for part in vector_parts]
                if line.endswith(']'):  # 如果向量末尾有 ']' 表示向量结束
                    
                    vector_str = ' '.join(vector_parts)
                    vector_str = vector_str.rstrip(']')
                    # 将向量字符串转换为浮点数数组
                    print(word)
                    vector = [float(num_str) for num_str in vector_str.split()]
                    # 存入词典
                    embedding_dict[word] = vector
                    # 重置 word 和 vector_parts 以准备读取下一个词和向量
                    word = None
                    vector_parts = []
    return embedding_dict

# 将词向量可视化
plt.figure(figsize=(20, 20))
# 只画出1000个，太多显示效果很差
count = 0
file_path = 'CBOW_ZH_wordvec.txt'  
word2ReduceDimensionVec = read_embedding_file(file_path)

for word, wordvec in word2ReduceDimensionVec.items():
    if count < 1000:
        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号，否则负号会显示成方块
        plt.scatter(wordvec[0], wordvec[1])
        plt.annotate(word, (wordvec[0], wordvec[1]))
        count += 1
plt.show()