import jieba
import fasttext
import fasttext.util
import pickle as pkl

# with open('data/中国寓言故事.txt', 'r', encoding='utf-8') as f:
#     texts = f.readlines()

# 示例文本
# texts = ["这是一个句子。", "这是另一个句子。", "中文分词很重要。"]

sourcefile = "../data/test_result.txt"

# 分词并保存为 FastText 格式
# with open(sourcefile, 'w', encoding='utf-8') as f:
#     for text in texts:
#         tokens = jieba.lcut(text)  # 分词
#         f.write(' '.join(tokens) + '\n')  # 每行一个句子



# 训练词向量
# model = fasttext.train_unsupervised(sourcefile, model='cbow', dim=8, epoch=30, minCount=1)
model = fasttext.train_unsupervised(sourcefile, model='skipgram', dim=128, epoch=30, lr=0.001, minCount=1)

fasttext.util.reduce_model(model, 16)

# 保存词向量为文本格式
with open('../data/word_vectors.txtmp', 'w', encoding='utf-8') as f:
    for word in model.words:
        vector = model.get_word_vector(word)
        f.write(f"{word} {' '.join(map(str, vector))}\n")

# model.quantize(input=sourcefile,retrain=True)

modelfile = '../data/word_vectors.bin'
# 保存模型
model.save_model(modelfile)

# wmodel = fasttext.load_model(modelfile)
# # 获取某个词的词向量
# word = "王国"
# if word in wmodel.words:
#     word_vector = wmodel.get_word_vector(word)
#     print(f"词向量: {word_vector}")
# else:
#     print("词汇表中不存在该词")

# 获取未登录词的词向量
# oov_word = "未登录词"
# oov_vector = wmodel.get_word_vector(oov_word)
# print(f"未登录词 '{oov_word}' 的词向量: {oov_vector}")

# 同义词
# similar_words = wmodel.get_nearest_neighbors("创建")
similar_map = {}
with open('../data/同义词.txt', 'r', encoding='utf-8') as f:
    swords = f.readlines()

for spairs in swords:
    ss = spairs.split()
    w = ss[0].split("/")[0]
    for si in ss[1:]:
        similar_map[si] = w

with open('../data/generate_pkl/similars.pkl', 'wb') as f: #把这个处理后的文件当作训练数据
    pkl.dump(similar_map, f)   #把文件写成pkl格式


# with open('../data/generate_pkl/poscab.pkl', 'wb') as f: #把这个处理后的文件当作训练数据
#     pkl.dump(similar_map, f)   #把文件写成pkl格式

# import numpy as np
# def cosine_similarity(vec1, vec2):
#     return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

# word1 = "创建"
# word2 = "新建"

# vec1 = wmodel.get_word_vector(word1)
# vec2 = wmodel.get_word_vector(word2)
# print(vec1, vec2)
# similarity = cosine_similarity(vec1, vec2)
# print(f"'{word1}' 和 '{word2}' 的相似度: {similarity}")
