import gensim
import pandas as pd
import os

def train_word2vec(train_path, test_path, save_path, vector_size=300, window=5):
    """训练word2vec模型
    
    Args:
        train_path: 训练集路径
        test_path: 测试集路径
        save_path: 模型保存路径
        vector_size: 词向量维度
        window: 窗口大小
    """
    # 创建保存目录
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    
    # 读取数据
    train_df = pd.read_csv(train_path, sep='\t')
    test_df = pd.read_csv(test_path, sep='\t')
    total_df = pd.concat([train_df['text'], test_df['text']], axis=0)
    
    # 准备语料
    corpus_text = '\n'.join(total_df)
    sentences = corpus_text.split('\n')
    sentences = [line.split(' ') for line in sentences]
    
    # 训练模型
    model = gensim.models.Word2Vec(
        sentences,
        vector_size=vector_size,
        window=window,
        sg=1,
        workers=8
    )
    
    # 保存模型
    model.save(save_path)

if __name__ == '__main__':
    config = {
        'train_path': 'D:/Code/Python/NLP/textCNN/data/train_set.csv',
        'test_path': 'D:/Code/Python/NLP/textCNN/data/test_a.csv',
        'save_path': 'D:/Code/Python/NLP/textCNN/tmp_data/word2vec.d300.sg.w5.model',
        'vector_size': 300,
        'window': 5
    }
    
    train_word2vec(
        config['train_path'],
        config['test_path'],
        config['save_path'],
        config['vector_size'],
        config['window']
    ) 