# 中文分词工具，将句子拆分成词语序列
import jieba
# 自然语言处理库，提供Word2Vec等词嵌入模型
from gensim.models import Word2Vec
# 数值计算库，用于向量操作
import numpy as np


def tokenize_sentence(sentence: str):
    """将句子分词并返回分词列表"""
    return jieba.lcut(sentence)


def train_word2vec_model(sentences: list, vector_size=100, min_count=2):
    """训练Word2Vec模型"""
    return Word2Vec(sentences, vector_size=vector_size, min_count=min_count)


def sentence_to_vector(sentence:  str, model: Word2Vec, vector_size:  int):
    """ 将句子转换为向量表示（词向量的平均）"""
    words = tokenize_sentence(sentence) # 分词
    vector = np.zeros(vector_size)  # 初始化全零向量
    word_count = 0 # 记录有效词的数量
    # 遍历句子中的每个词
    for word in words:
        if word in model.wv:  # 检查词是否在词汇表中
            vector += model.wv[word]  # 累加词向量
            word_count += 1

    if word_count > 0:
        vector /= word_count  # 计算平均值

    return vector

# s = "自然语言处理很有趣"
# print(train_word2vec_model(tokenize_sentence(s)))
