import warnings

import gensim
from gensim.models import Word2Vec
import smart_open
import itertools

from utils.Segment import Seg

# 忽略 NotOpenSSLWarning 警告
warnings.filterwarnings("ignore", category=UserWarning, module="urllib3")

seg = Seg()


def preprocess_text(text):
    """对文本进行预处理，包括分词和去除停用词"""
    filtered_words = seg.cut_for_search(text)
    return filtered_words


def train_word2vec_model(corpus_path, model_path, vector_size=100, window=5, min_count=1, workers=4,
                         sg=0,hs=1,negative=5,sample=1e-3,alpha=0.01,iter=5,seed=1):
    """训练并保存 Word2Vec 模型"""
    # 读取语料库
    sentences = []
    with smart_open.open(corpus_path, 'r', encoding='utf-8') as file:
        for line in file:
            preprocessed_line = preprocess_text(line)
            sentences.append(preprocessed_line)

    # 初始化或加载模型
    try:
        model = Word2Vec.load(model_path)
    except FileNotFoundError:
        model = Word2Vec(
            vector_size=vector_size,
            window=window,
            min_count=min_count,
            workers=workers,
            sg=sg,
            hs=hs,
            negative=negative,
            sample=sample,
            alpha=alpha,
            epochs=iter,
            seed=seed
        )
        # 建立词汇表
        model.build_vocab(sentences)

    model.workers = workers
    # 设置新的参数
    model.vector_size = vector_size
    model.window = window
    model.min_count = min_count
    model.sg = sg
    model.hs = hs
    model.negative = negative
    model.sample = sample
    model.alpha = alpha
    model.epochs = iter
    model.seed = seed

    # 继续训练模型
    model.build_vocab(sentences, update=True)
    model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs)

    # 保存模型
    model.save(model_path)

    # 返回模型以便评估
    return model

def calculate_sentence_similarity(model, sentence1, sentence2):
    """计算两个句子之间的相似度"""
    # 预处理句子
    preprocessed_sentence1 = preprocess_text(sentence1)
    preprocessed_sentence2 = preprocess_text(sentence2)

    if len(preprocessed_sentence2)==0:
        return 0

    # 计算句子向量
    sentence1_vector = sum([model.wv[word] for word in preprocessed_sentence1 if word in model.wv])
    sentence2_vector = sum([model.wv[word] for word in preprocessed_sentence2 if word in model.wv])

    # 计算余弦相似度
    from sklearn.metrics.pairwise import cosine_similarity
    similarity = cosine_similarity([sentence1_vector], [sentence2_vector])[0][0]

    return similarity

def evaluate_sentence_similarity(model, similarity_path):
    """评估句子相似度"""
    # 句子相似度评估
    print("句子相似度评估...")
    # 使用 smart_open 打开文件
    with smart_open.open(similarity_path, 'r', encoding='utf-8') as file:
        total_score = 0.0
        total_records = 0
        for line in file:
            try:
                sentence1, sentence2, score = line.strip().split(',')
                predicted_similarity = calculate_sentence_similarity(model, sentence1, sentence2)
                #print(f"predicted_similarity:{predicted_similarity}, similarity:{score}")
                actual_similarity = float(score)
                total_score += abs(predicted_similarity - actual_similarity)
                total_records += 1
            except ValueError as e:
                print(f"Error processing line: {e}")
                print(f"Line: {sentence1}, {sentence2}, {score}")

        if total_records > 0:
            # 使用了平均差异作为评估指标--越小越好
            average_difference = total_score / total_records
            print("预测相似度评分 与 实际相似度评分之间的平均差异:", average_difference)
            return average_difference
        else:
            print("No valid records found.")

    return 0

# 主函数
if __name__ == "__main__":
    # 指定语料库路径和模型保存路径
    corpus_path = '/Users/apple/PycharmProjects/sentence-similarity/data/train.txt'
    model_path = '/Users/apple/PycharmProjects/sentence-similarity/word2vec_model/my_model.model'

    # 指定评估数据集路径
    similarity_path = '/Users/apple/PycharmProjects/sentence-similarity/data/dev.txt'

    # 总结忽略环境资源相关的参数（如 workers, batch_words 等），专注于对模型准确性有直接影响的参数
    # vector_size, window, min_count, sg, hs, negative, epochs 是对模型准确性有直接影响的关键参数。
    # sample, ns_exponent, cbow_mean, min_alpha 也会影响模型准确性，但它们的影响相对较小。
    # compute_loss 不直接影响模型准确性，但它可以帮助监测训练过程。

    # 定义参数范围---7776种组合，没计算资源训练
    #vector_sizes = [50, 100, 150]  # 词向量的维度大小
    #windows = [3, 5, 7]  # 上下文窗口大小
    #min_counts = [1, 5, 10]  # 最小出现频率
    #sgs = [0, 1]  # 模型类型 (CBOW or Skip-gram)
    #hss = [0, 1]  # 是否使用层次 softmax
    #negatives = [5, 10, 20]  # 负采样大小
    #samples = [1e-3, 1e-4, 1e-5]  # 欠采样阈值
    #alphas = [0.01, 0.05]  # 初始学习率
    #iters = [5, 10]  # 迭代次数
    #seeds = [1, 42, 100]  # 随机种子

    # 436种，尝试持续训练，找到最优组合
    vector_sizes = [50, 100, 150]  # 词向量的维度大小
    windows = [3, 5, 7]  # 上下文窗口大小
    min_counts = [1, 5, 10]  # 最小出现频率
    sgs = [0, 1]  # 模型类型 (CBOW or Skip-gram)
    hss = [0, 1]  # 是否使用层次 softmax
    negatives = [5, 10, 20]  # 负采样大小
    samples = [1e-3, 1e-4, 1e-5]  # 欠采样阈值
    alphas = [0.01, 0.05]  # 初始学习率
    iters = [5, 10]  # 迭代次数
    seeds = [1, 42, 100]  # 随机种子

    # 144种，持续训练，找到最优组合
    #vector_sizes = [50, 100, 150]  # 词向量的维度大小
    #windows = [5]  # 上下文窗口大小 (减少取值数量)
    #min_counts = [5]  # 最小出现频率 (减少取值数量)
    #sgs = [0, 1]  # 模型类型 (CBOW or Skip-gram)
    #hss = [0, 1]  # 是否使用层次 softmax
    #negatives = [5, 10, 20]  # 负采样大小
    #samples = [1e-3]  # 欠采样阈值 (减少取值数量)
    #alphas = [0.01, 0.05]  # 初始学习率
    #iters = [5, 10]  # 迭代次数
    #seeds = [1]  # 随机种子 (减少取值数量)

    workers = [7]  # 并发训练的线程数

    params = None
    best_score = float('inf')

    # 生成所有参数组合
    all_combinations = list(
        itertools.product(vector_sizes, windows, min_counts, workers, sgs, hss, negatives, samples, alphas, iters, seeds))

    # 遍历所有参数组合
    for vector_size, window, min_count, worker, sg, hs, negative, sample, alpha, iter, seed in all_combinations:

        params = {'vector_size': vector_size, 'window': window, 'min_count': min_count,
                       'workers': worker, 'sg': sg, 'hs': hs, 'negative': negative,
                       'sample': sample, 'alpha': alpha, 'iter': iter, 'seed': seed}
        print(f"current parameters {params}")

        # 训练模型
        trained_model = train_word2vec_model(corpus_path, model_path,
                                             vector_size=vector_size,
                                             window=window,
                                             min_count=min_count,
                                             workers=worker,
                                             sg=sg,
                                             hs=hs,
                                             negative=negative,
                                             sample=sample,
                                             alpha=alpha,
                                             iter=iter,
                                             seed=seed)
        # 评估模型
        score = evaluate_sentence_similarity(trained_model, similarity_path)
        # 更新最佳参数
        if score < best_score:
            best_score = score
            print(f"current best parameters {params}")

    print(f"Best parameters:{params}")
