import jieba
from gensim.models import Word2Vec
from gensim.models.word2vec import PathLineSentences
import config


def text_to_words(input_file):
    """使用jieba进行中文分词"""
    with open(input_file, 'r', encoding='utf-8') as file:
        text = file.read()
        words = jieba.cut(text)
    return ' '.join(words)

def train_word2vec(sentences, vector_size=config.vec_dim, window=5, min_count=1, workers=4):
    """训练Word2Vec模型"""
    model = Word2Vec(sentences, vector_size=vector_size, window=window, min_count=min_count, workers=workers)
    return model

def load_model(model_file):
    """加载Word2Vec模型"""
    model = Word2Vec.load(model_file)
    return model


def run():
    # 分词
    input_file = 'input/xyj.txt'
    segmented_text = text_to_words(input_file)

    # 将分词结果写入新的文件，供Word2Vec使用
    segmented_file = config.seg_file
    with open(segmented_file, 'w', encoding='utf-8') as file:
        file.write(segmented_text)

    # 训练Word2Vec模型
    sentences = PathLineSentences(segmented_file)  # 使用PathLineSentences读取分词后的文件
    model = train_word2vec(sentences)

    # 保存模型
    model_file = config.model_file
    model.save(model_file)



if __name__=='__main__':
    
    run()