import pandas as pd
import jieba
from gensim.models import Word2Vec
from utils.config import Config

# 配置文件路径
root_path = "E:/Python+AI/group4_nlp_project"
conf = Config(root_path)


class Word2VecProcessor:
    """
    Word2Vec 封装处理器
    - 统一了训练、保存、加载、向量化接口
    - 解决了原始版本 load_model 返回 Word2Vec 导致丢失自定义方法的问题
    """

    def __init__(self, vector_size=100, window=5, min_count=3, epochs=10, stopwords_path=None):
        """
        初始化 Word2Vec 处理器
        :param vector_size: 词向量维度
        :param window: 上下文窗口大小
        :param min_count: 最小词频阈值
        :param epochs: 训练轮次
        :param stopwords_path: 停用词文件路径（默认从 config 里读取）
        """
        self.vector_size = vector_size
        self.window = window
        self.min_count = min_count
        self.epochs = epochs
        self.model = None

        # 加载停用词
        if stopwords_path is None:
            stopwords_path = conf.stopwords_path
        self.stopwords = self._load_stopwords(stopwords_path)

    # =========================
    # 内部工具函数
    # =========================
    def _load_stopwords(self, stopwords_path):
        """加载停用词表"""
        try:
            with open(stopwords_path, "r", encoding="utf-8") as f:
                return set([line.strip() for line in f if line.strip()])
        except Exception as e:
            print(f"加载停用词失败: {e}")
            return set()

    def _tokenize(self, text: str):
        """
        分词 + 停用词过滤
        :param text: 输入文本
        :return: 分词后的 token 列表
        """
        if not text or pd.isna(text):
            return []
        words = [w for w in jieba.cut(str(text).strip()) if w.strip()]
        if self.stopwords:
            words = [w for w in words if w not in self.stopwords]
        return words

    # =========================
    # 模型训练 / 保存 / 加载
    # =========================
    def train(self, df, text_column="review"):
        """
        训练 Word2Vec 模型
        :param df: pandas DataFrame，包含文本数据
        :param text_column: 文本列名
        :return: self（便于链式调用）
        """
        sentences = df[text_column].fillna("").astype(str).apply(self._tokenize).tolist()
        self.model = Word2Vec(
            sentences=sentences,
            vector_size=self.vector_size,
            window=self.window,
            min_count=self.min_count,
            workers=4,
            epochs=self.epochs
        )
        return self

    def save(self, model_path: str):
        """
        保存 Word2Vec 模型
        :param model_path: 模型保存路径
        """
        if self.model is None:
            raise ValueError("模型未训练，无法保存")
        self.model.save(model_path)

    @classmethod
    def load(cls, model_path: str, stopwords_path=None):
        """
        加载 Word2Vec 模型（类方法）
        :param model_path: 模型文件路径
        :param stopwords_path: 停用词路径（可选）
        :return: Word2VecProcessor 实例（包含已加载的模型）
        """
        instance = cls(stopwords_path=stopwords_path)
        instance.model = Word2Vec.load(model_path)
        return instance

    # =========================
    # 向量操作
    # =========================
    def get_word_vector(self, word):
        """
        获取单个词的向量
        :param word: 输入词
        :return: 向量（如果不在词表返回 None）
        """
        if self.model is None:
            raise ValueError("模型未加载")
        return self.model.wv[word] if word in self.model.wv else None

    def get_sentence_vector(self, text: str):
        """
        将文本转为句子向量（取所有词向量平均）
        :param text: 输入文本
        :return: 句子向量（numpy 数组），如果无有效词返回 None
        """
        if self.model is None:
            raise ValueError("模型未加载")
        tokens = self._tokenize(text)
        vectors = [self.model.wv[w] for w in tokens if w in self.model.wv]
        return sum(vectors) / len(vectors) if vectors else None

    def get_vocabulary(self):
        """
        获取词汇表
        :return: 词汇表列表
        """
        if self.model is None:
            raise ValueError("模型未加载")
        return list(self.model.wv.index_to_key)
