# -*- coding: utf-8 -*-
# @Author  : gaoyu
# @Time    : 2025/1/27
# @Function: NLP分词和词向量匹配系统

import json
import os
from config.config import DB_CONFIG
import re
import psycopg2
from typing import List, Tuple, Dict
import pandas as pd
import jieba
import jieba.posseg as pseg
from loguru import logger
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


class NLPMatcher:
    def __init__(self):
        """初始化NLP匹配器"""
        # 初始化jieba词典
        from .init_jieba_dict import init_jieba_dict
        init_jieba_dict()
        logger.info("Jieba分词词典初始化完成")

        # 从数据库加载配置
        self.config = self._load_config_from_db()

        # 停用词列表
        self.stop_words = set(self.config.get('stop_words', []))

        # 关键词权重配置
        self.keyword_weights = self.config.get('keyword_weights', {})

        # 参考问题库
        self.reference_questions = self.config.get('reference_questions', [])

        # 初始化TF-IDF向量化器
        self.vectorizer = TfidfVectorizer(
            tokenizer=self._tokenize,
            lowercase=False,
            max_features=1000,
            ngram_range=(1, 2)
        )

        # 预处理参考问题
        self._preprocess_reference_questions()

        logger.info("NLP匹配器初始化完成")

    def _load_config_from_db(self):
        """从数据库加载配置"""
        config = {}

        # 数据库连接参数
        db_params = DB_CONFIG

        try:
            # 连接数据库
            conn = psycopg2.connect(**db_params)
            logger.info("成功连接到数据库")

            with conn.cursor() as cursor:
                # 查询配置记录
                cursor.execute("SELECT stop_words, keyword_weights, reference_questions FROM t_nlp_config WHERE id = 1")
                row = cursor.fetchone()

                if row:
                    # 获取配置数据
                    stop_words = row[0] or []
                    keyword_weights = row[1] or {}
                    reference_questions = row[2] or []

                    logger.info(
                        f"从数据库加载配置: {len(stop_words)}个停用词, {len(keyword_weights)}个关键词权重, {len(reference_questions)}个参考问题")

                    config = {
                        'stop_words': stop_words,
                        'keyword_weights': keyword_weights,
                        'reference_questions': reference_questions
                    }
                else:
                    logger.warning("数据库中未找到配置记录")

            # 关闭连接
            conn.close()
            logger.info("数据库连接已关闭")

        except psycopg2.Error as e:
            logger.error(f"数据库连接失败: {e}")
            # 使用默认配置作为回退
            config = self._get_default_config()
            logger.info("使用默认配置作为回退")

        return config

    def _tokenize(self, text: str) -> List[str]:
        """分词函数"""
        # 使用jieba进行分词
        words = jieba.lcut(text)

        # 过滤停用词和标点符号
        filtered_words = []
        for word in words:
            if (word not in self.stop_words and
                    len(word.strip()) > 0 and
                    not re.match(r'^[\s\W]+$', word)):
                filtered_words.append(word)

        return filtered_words

    def extract_keywords(self, text: str) -> List[Tuple[str, float]]:
        """提取关键词及其权重"""
        # 分词并获取词性
        words_with_pos = pseg.cut(text)

        keywords = []
        for word, pos in words_with_pos:
            # 过滤停用词
            if word in self.stop_words:
                continue

            # 计算权重
            weight = 1.0

            # 根据词性调整权重
            if pos in ['n', 'nr', 'ns', 'nt', 'nz']:  # 名词类
                weight *= 1.5
            elif pos in ['v', 'vd', 'vn']:  # 动词类
                weight *= 1.2
            elif pos in ['a', 'ad', 'an']:  # 形容词类
                weight *= 1.1

            # 根据预定义关键词调整权重
            if word in self.keyword_weights:
                weight *= self.keyword_weights[word]

            # 检查是否包含重要关键词
            for key, key_weight in self.keyword_weights.items():
                if key in word or word in key:
                    weight *= key_weight
                    break

            if len(word) > 1:  # 过滤单字符
                keywords.append((word, weight))

        # 按权重排序
        keywords.sort(key=lambda x: x[1], reverse=True)
        return keywords

    def _preprocess_reference_questions(self):
        """预处理参考问题，构建向量空间"""
        # 提取所有问题的主干部分
        processed_questions = []
        for question in self.reference_questions:
            keywords = self.extract_keywords(question)
            # 只保留权重较高的关键词
            main_keywords = [kw[0] for kw in keywords if kw[1] >= 1.0]
            processed_questions.append(' '.join(main_keywords))

        # 构建TF-IDF矩阵
        self.reference_vectors = self.vectorizer.fit_transform(processed_questions)
        self.processed_references = processed_questions

        logger.info(f"预处理了{len(self.reference_questions)}个参考问题")

    def find_best_match(self, query: str, top_k: int = 3) -> List[Tuple[str, float, List[str]]]:
        """找到最佳匹配的参考问题"""
        # 提取查询问题的关键词
        query_keywords = self.extract_keywords(query)
        main_query_keywords = [kw[0] for kw in query_keywords if kw[1] >= 1.0]

        if not main_query_keywords:
            logger.warning("未提取到有效关键词")
            return []

        # 构建查询向量
        query_text = ' '.join(main_query_keywords)
        query_vector = self.vectorizer.transform([query_text])

        # 计算余弦相似度
        similarities = cosine_similarity(query_vector, self.reference_vectors)[0]

        # 获取最相似的问题
        results = []
        for i, similarity in enumerate(similarities):
            if similarity > 0:  # 只返回有相似度的结果
                results.append((
                    self.reference_questions[i],
                    similarity,
                    main_query_keywords
                ))

        # 按相似度排序
        results.sort(key=lambda x: x[1], reverse=True)

        return results[:top_k]

    def analyze_query(self, query: str) -> Dict:
        """分析查询问题"""
        logger.info(f"分析查询: {query}")

        # 提取关键词
        keywords = self.extract_keywords(query)

        # 找到最佳匹配
        matches = self.find_best_match(query)

        # 构建分析结果
        result = {
            'original_query': query,
            'extracted_keywords': keywords[:10],  # 前10个关键词
            'main_keywords': [kw[0] for kw in keywords if kw[1] >= 1.5],
            'best_matches': matches,
            'match_count': len(matches)
        }

        return result


def exec_nlp_matcher(test_query):
    """测试NLP匹配器"""
    matcher = NLPMatcher()

    logger.debug(f"测试查询: {test_query}")
    logger.debug("=" * 50)

    # 分析查询
    result = matcher.analyze_query(test_query)

    # 输出结果
    logger.debug(f"提取的关键词:")
    for keyword, weight in result['extracted_keywords']:
        logger.debug(f"  {keyword}: {weight:.2f}")

    logger.debug(f"主要关键词: {', '.join(result['main_keywords'])}")

    logger.debug(f"最佳匹配结果:")
    # 创建DataFrame并按相似度倒序排列
    if result['best_matches']:
        df_data = []
        for question, similarity, query_keywords in result['best_matches']:
            df_data.append({
                '问题': question,
                '相似度': similarity,
                '匹配关键词': ', '.join(query_keywords)
            })

        df = pd.DataFrame(df_data)
        df = df.sort_values('相似度', ascending=False).reset_index(drop=True)

        logger.debug(f"共找到 {len(df)} 个匹配结果")

        return result, df
    else:
        print("未找到匹配结果")
        return result, pd.DataFrame()