import jieba
import re
import jieba.posseg as pseg

para: tuple = {
    "vector_size": 256,
    "window": 5,
    "min_count": 2,
    "sg": 1,
    "epochs": 5,
    "negitive": 20,
}
version: int = 0

def add_version():
    global version
    with open("./models/version.txt", 'r', encoding='utf-8') as f:
        version = f.read().strip()
        version = int(version)
        print(f'practise model v{version + 1}')

    with open('./models/version.txt', 'w', encoding='utf-8') as f:
        version = version + 1
        print(version, file=f)

def generate_wordvec():
    # 加载自定义词典
    jieba.load_userdict('./dic/tcmdic.txt')
    jieba.load_userdict('./dic/bettertcm.txt')

    def load_stopwords(filepath):
        with open(filepath, encoding='utf-8') as f:
            return set(line.strip() for line in f)

    # 分词结果存储
    words = []
    stopwords = load_stopwords('./dic/stopwords.txt')

    # 存储词和对应行号的映射
    word_with_line = []

    # 读取文件并分词
    with open('to-split-data.csv', encoding='utf-8') as file:
        for line_num, line in enumerate(file, 1):  # 使用行号作为标签
            # 使用默认模式分词
            temp_words = jieba.lcut(line.strip())
            filtered_words = [word for word in temp_words if not re.search(r'[^\w\s]', word) and word not in stopwords and len(word) > 1]
            
            # 将每个词和对应的行号保存
            for word in filtered_words:
                word_with_line.append((word, line_num))
            
            words.append(filtered_words)

    # 输出带有行号标签的词汇
    print('写入分词结果带行号标签')
    with open('./models/word-split-with-line.txt', 'w', encoding='utf-8') as f:
        for word, line in word_with_line:
            print(f"{word}\t{line}", file=f)

    # 使用 gensim 训练 Word2Vec 模型
    from gensim.models import Word2Vec
    from gensim.models.word2vec import LineSentence
    # 设置Word2Vec的参数
    model = Word2Vec(
        sentences=words,
        vector_size=para['vector_size'],  # 词向量的维度，可以根据需求调整
        window=para['window'],  # 上下文窗口大小，表示当前词和前后词的距离
        min_count=5,  # 忽略出现频率小于 min_count 的词
        sg=para['sg'],  # 训练算法，0 表示 CBOW，1 表示 Skip-gram
        epochs=para['epochs'],  # 训练轮数
        negative=para['negitive'],
        workers=8,
        hs=0,

    )

    # 保存训练好的模型
    model.save(f'./models/word2vec-tinymodel-v{version}.model')
    print("Model saved.")
    return model

def show_visual_vec():
    import visualmodel2DwebGL
    visualmodel2DwebGL.draw_word2vec_visualization(version=version)

add_version()
generate_wordvec()
show_visual_vec()
