import jieba
import math
import re
import os
import json
import psycopg2
from collections import defaultdict
import jieba.posseg as pseg
from config.config import DB_CONFIG
from loguru import logger
from typing import List, Set, Dict, Tuple, Optional


class KeywordExtractor:
    DEFAULT_STOPWORDS = {
        "哪些", "的", "了", "在", "是", "我", "有", "和", "就", "不", "人", "都", "一", "一个", "上",
        "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好", "自己", "这", "那",
        "什么", "多少", "怎么", "为什么", "哪里", "什么时候", "如何", "请", "给我", "提供", "告诉我",
        "我想知道", "能否", "可以", "吗", "呢", "啊", "呀", "吧", "么", "嘛", "及", "以上", "以下",
        "等", "包括", "主要", "基本", "相关"
    }

    def __init__(self, db_params: Dict[str, str], core_terms_path: str, general_terms_path: str,
                 qa_path: str, custom_dict_path: str):
        """初始化关键词提取器

        Args:
            db_params: 数据库连接参数
            core_terms_path: 核心词文件路径
            general_terms_path: 一般词文件路径
            qa_path: QA文件路径
            custom_dict_path: 自定义词典路径
        """
        # 初始化jieba
        jieba.setLogLevel(jieba.logging.INFO)

        # 存储参数
        self.db_params = db_params
        self.core_terms_path = core_terms_path
        self.general_terms_path = general_terms_path
        self.qa_path = qa_path
        self.custom_dict_path = custom_dict_path

        # 初始化状态变量
        self.core_terms = set()
        self.general_terms = set()
        self.stopwords = self.DEFAULT_STOPWORDS
        self.hybrid_weights: Dict[str, float] = {}
        self.questions: List[str] = []

        logger.info("关键词提取器初始化完成")

    def load_terms(self):
        """从文件加载核心词和一般词"""
        logger.info("开始加载核心词和一般词")

        # 加载核心词
        self.core_terms = self._load_text_file(self.core_terms_path)
        logger.info(f"加载了{len(self.core_terms)}个核心词")

        # 加载一般词
        self.general_terms = self._load_text_file(self.general_terms_path)
        logger.info(f"加载了{len(self.general_terms)}个一般词")

    def load_stopwords(self):
        """从数据库加载停用词"""
        logger.info("开始从数据库加载停用词")

        try:
            # 连接数据库
            conn = psycopg2.connect(**self.db_params)
            logger.info("成功连接到数据库")

            with conn.cursor() as cursor:
                # 查询停用词配置
                cursor.execute("SELECT stop_words FROM t_nlp_config WHERE id = 1")
                row = cursor.fetchone()

                if row and row[0]:
                    self.stopwords = set(row[0])
                    logger.info(f"从数据库加载了{len(self.stopwords)}个停用词")
                else:
                    logger.warning("数据库中未找到停用词配置，将使用默认停用词")

            # 关闭连接
            conn.close()
            logger.info("数据库连接已关闭")

        except psycopg2.Error as e:
            logger.error(f"数据库连接失败: {e}")
            logger.warning("数据库连接失败，将使用默认停用词")

    def load_questions(self):
        """从QA文件提取问题"""
        logger.info(f"开始从 {self.qa_path} 提取问题")
        self.questions = self._extract_questions_from_file(self.qa_path)

        if not self.questions:
            raise ValueError("未提取到有效问题")

        logger.info(f"成功提取{len(self.questions)}个问题")

    def _init_custom_dict(self) -> None:
        """初始化自定义词典"""
        # 合并核心词和一般词
        all_terms = self.core_terms.union(self.general_terms)

        # 写入词典文件
        with open(self.custom_dict_path, 'w', encoding='utf-8') as f:
            for term in all_terms:
                f.write(f"{term} 5 n\n")

        # 加载词典
        jieba.load_userdict(self.custom_dict_path)
        logger.info(f"自定义词典已生成并加载，包含{len(all_terms)}个词")

    def segment_questions(self) -> List[List[str]]:
        """分词处理问题列表"""
        logger.info("开始分词处理")

        if not self.questions:
            raise ValueError("未加载任何问题")

        # 初始化自定义词典
        self._init_custom_dict()

        segmented = []
        for q in self.questions:
            words = jieba.lcut(q, cut_all=False)
            filtered = [
                w for w in words
                if w.strip()
                   and not re.fullmatch(r'[^\w\s]', w)
                   and w not in self.stopwords
                   and (len(w) > 1 or w.isdigit())  # 保留数字，过滤其他单字符
            ]
            segmented.append(filtered)

        avg_words = round(sum(len(d) for d in segmented) / len(segmented), 1)
        logger.info(f"分词完成，平均每个问题含{avg_words}个有效词")
        return segmented

    def calculate_tfidf(self, segmented_questions: List[List[str]]) -> Dict[str, float]:
        """计算词的TF-IDF值"""
        logger.info("开始计算TF-IDF值")

        total_docs = len(segmented_questions)
        word_doc_count = defaultdict(int)
        word_tf_sum = defaultdict(float)

        # 计算词频和文档频率
        for doc in segmented_questions:
            doc_len = len(doc)
            if doc_len == 0:
                continue

            doc_unique = set(doc)
            for word in doc_unique:
                word_doc_count[word] += 1

            word_counts = defaultdict(int)
            for word in doc:
                word_counts[word] += 1
            for word, count in word_counts.items():
                tf = count / doc_len
                word_tf_sum[word] += tf

        # 计算TF-IDF
        tfidf = {}
        for word in word_doc_count:
            avg_tf = word_tf_sum[word] / total_docs
            idf = math.log(total_docs / word_doc_count[word])
            tfidf[word] = avg_tf * idf

        # 归一化
        max_tfidf = max(tfidf.values()) if tfidf else 1.0
        return {word: round(value / max_tfidf, 4) for word, value in tfidf.items()}

    def calculate_hybrid_weight(self):
        """计算混合权重"""
        logger.info("开始计算混合权重")

        if not self.questions:
            raise ValueError("未加载任何问题")

        # 分词
        segmented = self.segment_questions()

        # 计算基础TF-IDF值
        tfidf_weights = self.calculate_tfidf(segmented)

        # 获取所有唯一词
        all_words = {word for doc in segmented for word in doc}

        # 计算混合权重
        hybrid_weights = {}
        for word in all_words:
            tfidf_w = tfidf_weights.get(word, 0.0)
            prof_type = 'core' if word in self.core_terms else 'general' if word in self.general_terms else 'other'

            if prof_type == 'core':
                if tfidf_w == 0:  # 处理核心词的TF-IDF为0的情况
                    hybrid_weights[word] = 2.8
                else:
                    hybrid_weights[word] = 2.5 + (tfidf_w * 0.5)  # [2.5, 3.0]
            elif prof_type == 'general':
                hybrid_weights[word] = 1.5 + (tfidf_w * 1.0)  # [1.5, 2.5]
            else:
                hybrid_weights[word] = 1.0 + (tfidf_w * 0.5)  # [1.0, 1.5]

        # 确保权重在[1.0, 3.0]范围内
        self.hybrid_weights = {word: round(max(1.0, min(3.0, weight)), 4) for word, weight in hybrid_weights.items()}

        # 排序权重
        self.hybrid_weights = dict(sorted(self.hybrid_weights.items(), key=lambda x: x[1], reverse=True))
        logger.info(f"混合权重计算完成，共{len(self.hybrid_weights)}个词")
        return self.hybrid_weights

    def save_weights_to_db(self) -> bool:
        """保存权重到数据库"""
        logger.info("开始保存权重到数据库")

        if not self.hybrid_weights:
            logger.error("权重未计算，无法保存")
            return False

        weights_json = json.dumps(self.hybrid_weights, ensure_ascii=False)

        try:
            # 连接数据库
            conn = psycopg2.connect(**self.db_params)
            logger.info("成功连接到数据库")

            with conn.cursor() as cursor:
                # 首先检查id为1的记录是否存在
                cursor.execute("SELECT id FROM t_nlp_config WHERE id = 1")
                record_exists = cursor.fetchone()

                # 如果存在则更新
                if record_exists:
                    update_query = """
                    UPDATE t_nlp_config 
                    SET keyword_weights = %s 
                    WHERE id = 1
                    RETURNING *;
                    """
                    cursor.execute(update_query, (weights_json,))
                    logger.info(f"成功更新了ID为1记录的keyword_weights字段")
                # 如果不存在则创建
                else:
                    insert_query = """
                    INSERT INTO t_nlp_config 
                    (id, keyword_weights) 
                    VALUES (1, %s) 
                    RETURNING *;
                    """
                    cursor.execute(insert_query, (weights_json,))
                    logger.info(f"成功创建了新记录(ID=1)，并添加了keyword_weights字段")

                # 获取并打印更新后的结果
                result = cursor.fetchone()
                logger.info(f"更新后的配置记录ID: {result[0]}")

                # 查询表中的所有记录
                cursor.execute("SELECT id, keyword_weights FROM t_nlp_config;")
                logger.info("表中的所有记录:")
                for row in cursor.fetchall():
                    weights_count = len(row[1]) if row[1] else 0
                    logger.info(f"ID {row[0]}: {weights_count}个关键词权重")

                # 提交事务
                conn.commit()
                logger.info("事务已提交")

            # 保存到本地文件
            '''output_path = os.path.join(os.path.dirname(__file__), "word_weights.json")
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(self.hybrid_weights, f, ensure_ascii=False, indent=4)
            logger.info(f"权重结果已保存至本地文件：{output_path}")'''

            return True

        except psycopg2.Error as e:
            logger.error(f"数据库操作出错: {e}")
            if 'conn' in locals() and not conn.closed:
                conn.rollback()
                logger.info("事务已回滚")
            return False
        finally:
            if 'conn' in locals() and not conn.closed:
                conn.close()
                logger.info("数据库连接已关闭")

    def run_full_process(self) -> bool:
        """执行完整的关键词提取和保存流程"""
        logger.info("开始完整的关键词提取流程")

        try:
            # 加载数据
            self.load_terms()
            self.load_stopwords()
            self.load_questions()

            # 计算混合权重
            self.calculate_hybrid_weight()

            # 保存结果
            success = self.save_weights_to_db()

            if success:
                logger.info("关键词权重计算和保存完成!")
                return True
            else:
                logger.error("权重保存失败")
                return False

        except Exception as e:
            logger.error(f"流程执行失败: {str(e)}")
            return False

    @staticmethod
    def _load_text_file(file_path: str) -> Set[str]:
        """加载文本文件到集合"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                return {line.strip() for line in f if line.strip()}
        except FileNotFoundError:
            logger.error(f"文件未找到：{file_path}")
            raise

    @staticmethod
    def _extract_questions_from_file(qa_path: str) -> List[str]:
        """从QA文件中提取问题"""
        try:
            with open(qa_path, 'r', encoding='utf-8') as f:
                text = f.read()

            question_pattern = r"【问题\d+】(.+?)\n"
            questions = re.findall(question_pattern, text, re.DOTALL)
            return [q.strip() for q in questions if q.strip()]
        except FileNotFoundError:
            logger.error(f"QA文件未找到：{qa_path}")
            raise


if __name__ == "__main__":
    # 配置日志
    logger.add("keyword_extraction.log", rotation="500 MB", level="INFO")

    # 获取当前目录
    current_path = os.path.dirname(os.path.abspath(__file__))

    # 定义文件路径
    core_terms_path = os.path.join(current_path, "core_terms.txt")
    general_terms_path = os.path.join(current_path, "general_terms.txt")
    qa_path = os.path.join(current_path, "qa_all.txt")
    custom_dict_path = os.path.join(current_path, "auto_custom_dict.txt")

    # 数据库连接参数
    db_params = DB_CONFIG

    # 创建并运行关键词提取器
    extractor = KeywordExtractor(
        db_params=db_params,
        core_terms_path=core_terms_path,
        general_terms_path=general_terms_path,
        qa_path=qa_path,
        custom_dict_path=custom_dict_path
    )

    # 执行完整流程
    success = extractor.run_full_process()

    if not success:
        logger.error("关键词提取流程失败")
        exit(1)