import warnings
from gensim.models import Word2Vec, TfidfModel
import smart_open

from utils.Segment import Seg

# 忽略 NotOpenSSLWarning 警告
warnings.filterwarnings("ignore", category=UserWarning, module="urllib3")

seg = Seg()

def preprocess_text(text):
    """对文本进行预处理，包括分词和去除停用词"""
    filtered_words = seg.cut_for_search(text)
    return filtered_words

def train_word2vec_model(corpus_path, model_path):
    """训练并保存 Word2Vec 模型"""
    # 读取语料库
    sentences = []
    with smart_open.open(corpus_path, 'r', encoding='utf-8') as file:
        for line in file:
            preprocessed_line = preprocess_text(line)
            sentences.append(preprocessed_line)

    # 训练 Word2Vec 模型
    #预测相似度评分与实际相似度评分之间的平均差异: 0.5475498289850197
    #Best parameters:{'vector_size': 150, 'window': 7, 'min_count': 10, 'workers': 4}

    # 实际相似度评分之间的平均差异: 0.48255770261350434
    # best parameters
    # {'vector_size': 50, 'window': 3, 'min_count': 1, 'workers': 7, 'sg': 0, 'hs': 0, 'negative': 5, 'sample': 0.0001,
    # 'alpha': 0.05, 'iter': 5, 'seed': 100}
    model = Word2Vec(sentences=sentences, vector_size=150, window=7, min_count=10, workers=4)

    #TfidfModel(sentences, id2word=model.wv.index_to_key,dd)

    # 保存模型
    model.save(model_path)

# 指定语料库路径和模型保存路径
corpus_path = '/Users/apple/PycharmProjects/sentence-similarity/data/train.txt'
model_path = '/Users/apple/PycharmProjects/sentence-similarity/word2vec_model/my_model.model'

# 训练并保存模型
train_word2vec_model(corpus_path, model_path)
