import jieba
import os
from gensim.models import word2vec
from gensim.test.utils import get_tmpfile

def load_stopwords(file_path):
    with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
        stopwords = [line.strip() for line in f]
    return set(stopwords)

def preprocess_text(text, stopwords):
    words = jieba.cut(text)
    return " ".join([word for word in words if word not in stopwords])

def read_and_preprocess_book(file_path, stopwords):
    with open(file_path, 'r', encoding='gb18030') as f:
        content = f.read()
    return preprocess_text(content, stopwords)

def prepare_corpus(book_titles, base_dir, stopwords, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    corpus_dict = {}
    for title in book_titles:
        file_path = os.path.join(base_dir, f'{title}.txt')
        preprocessed_text = read_and_preprocess_book(file_path, stopwords)
        output_path = os.path.join(output_dir, f'{title}_segment.txt')
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(preprocessed_text)
        corpus_dict[title] = preprocessed_text
    return corpus_dict

def train_word2vec_model(input_dir, output_model_path):
    sentences = word2vec.PathLineSentences(input_dir)
    model = word2vec.Word2Vec(sentences, hs=1, min_count=1, window=5, vector_size=200, sg=1, epochs=20)
    model.save(output_model_path)

def main():
    stopwords = load_stopwords('cn_stopwords.txt')
    book_titles = ["白马啸西风", "碧血剑", "飞狐外传", "连城诀", "鹿鼎记", "三十三剑客图", "射雕英雄传", "神雕侠侣", "书剑恩仇录", "天龙八部", "侠客行", "笑傲江湖", "雪山飞狐", "倚天屠龙记", "鸳鸯刀", "越女剑"]
    output_dir = './processed_books'
    prepare_corpus(book_titles, './jyxstxtqj_downcc', stopwords, output_dir)
    train_word2vec_model(output_dir, "word2vec.model")
    print("finish training....")

if __name__ == '__main__':
    main()
