# -*- coding: utf-8 -*-
# @Author  : gaoyu
# @Time    : 2025/7/4 15:59
# @Function:
import os
import traceback
from typing import List, Dict, Any

import psycopg2
from loguru import logger

from config import DB_CONFIG

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 尝试导入 NLPMatcher
try:
    from nlp.nlp_matcher import NLPMatcher
except ImportError as e:
    logger.error(f"导入 NLPMatcher 失败: {e}")


    # 如果导入失败，创建一个简单的替代类
    class NLPMatcher:
        def __init__(self):
            pass

        def _tokenize(self, text):
            return text.split()

        def extract_keywords(self, text):
            return [(word, 1.0) for word in text.split()]


class QAQuery:
    def __init__(self, db_config: dict):
        """初始化QA查询器

        Args:
            db_config (dict): 数据库连接配置
        """
        self.db_config = db_config
        self.nlp_matcher = NLPMatcher()

    def search_qa(self, question: str, limit: int = 5) -> List[Dict[str, Any]]:
        """搜索相关的QA数据

        Args:
            question (str): 查询问题
            limit (int): 返回结果数量限制

        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        try:
            # 对问题进行分词
            tokens = self.nlp_matcher._tokenize(question)
            keywords = self.nlp_matcher.extract_keywords(question)
            all_tokens = list(set(tokens + [kw[0] for kw in keywords]))

            logger.info(f"查询问题: {question}")
            logger.info(f"分词结果: {all_tokens}")

            # 连接数据库
            conn = psycopg2.connect(**self.db_config)

            try:
                with conn.cursor() as cursor:
                    # 查询SQL - 使用JSONB查询
                    query_sql = """
                    SELECT question, question_tokens, answer, category_id,
                           (
                               SELECT COUNT(*)
                               FROM jsonb_array_elements_text(question_tokens->'tokens') AS token
                               WHERE token = ANY(%s)
                           ) as match_count,
                           similarity(question, %s) as text_similarity
                    FROM qa_knowledge
                    WHERE question_tokens->'tokens' ?| %s
                    ORDER BY match_count DESC, text_similarity DESC
                    LIMIT %s
                    """

                    cursor.execute(query_sql, (all_tokens, question, all_tokens, limit))
                    results = cursor.fetchall()

                    # 格式化结果
                    formatted_results = []
                    for row in results:
                        formatted_results.append({
                            'question': row[0],
                            'question_tokens': row[1],
                            'answer': row[2],
                            'category_id': row[3],
                            'match_count': row[4] or 0,
                            'text_similarity': row[5] or 0.0
                        })

                    return formatted_results

            finally:
                conn.close()

        except Exception as e:
            traceback.print_exc()
            logger.error(f"搜索失败: {e}")
            return []

    def get_best_answer(self, question: str) -> Dict[str, Any]:
        """获取所有相关答案

        Args:
            question (str): 查询问题

        Returns:
            Dict[str, Any]: 所有答案结果
        """
        results = self.search_qa(question, limit=10)  # 可以调整limit数量

        if results:
            # 返回所有结果
            all_results = []
            for result in results:
                all_results.append({
                    'question': result['question'],
                    'answer': result['answer']['answer'],
                    'confidence': result['match_count'],
                    'similarity': result['text_similarity'],
                    'category_id': result.get('category_id')
                })

            return {
                'success': True,
                'question': question,
                'total_count': len(all_results),
                'results': all_results
            }
        else:
            return {
                'success': False,
                'question': question,
                'message': '未找到相关答案'
            }

    def search_qa_hybrid(self, question: str, category_id: str, limit: int = 10) -> List[Dict[str, Any]]:
        """混合搜索：结合数据库查询和NLP匹配器结果

        Args:
            question (str): 查询问题
            limit (int): 返回结果数量限制

        Returns:
            List[Dict[str, Any]]: 混合搜索结果
        """
        try:
            # 1. 使用NLP匹配器获取匹配结果
            try:
                from nlp.nlp_matcher import exec_nlp_matcher
                nlp_result, nlp_df = exec_nlp_matcher(question)
            except ImportError as e:
                logger.error(f"导入 exec_nlp_matcher 失败: {e}")
                # 创建空的DataFrame作为替代
                import pandas as pd
                nlp_result = None
                nlp_df = pd.DataFrame()

            # 2. 对问题进行分词（原有的数据库查询逻辑）
            tokens = self.nlp_matcher._tokenize(question)
            keywords = self.nlp_matcher.extract_keywords(question)
            all_tokens = list(set(tokens + [kw[0] for kw in keywords]))

            logger.info(f"查询问题: {question}")
            logger.info(f"分词结果: {all_tokens}")
            logger.info(f"NLP匹配器找到 {len(nlp_df)} 个结果")

            # 连接数据库
            conn = psycopg2.connect(**self.db_config)

            try:
                with conn.cursor() as cursor:
                    # 3. 构建混合查询SQL
                    if not nlp_df.empty:
                        # 获取NLP匹配的问题列表
                        nlp_questions = nlp_df['问题'].tolist()

                        # 混合查询：既包含分词匹配，也包含NLP匹配的问题
                        query_sql = """
                        SELECT DISTINCT question, question_tokens, answer, category_id,
                               (
                                   SELECT COUNT(*)
                                   FROM jsonb_array_elements_text(question_tokens->'tokens') AS token
                                   WHERE token = ANY(%s)
                               ) as match_count,
                               similarity(question, %s) as text_similarity,
                               CASE 
                                   WHEN question = ANY(%s) THEN 1 
                                   ELSE 0 
                               END as nlp_matched
                        FROM qa_knowledge
                        WHERE category_id = %s AND question_tokens->'tokens' ?| %s
                           OR question = ANY(%s)
                        ORDER BY nlp_matched DESC, match_count DESC, text_similarity DESC
                        LIMIT %s
                        """

                        cursor.execute(query_sql, (
                            all_tokens,  # 分词匹配
                            question,  # 相似度计算
                            nlp_questions,  # NLP匹配的问题（用于标记）
                            category_id,  # 类别ID
                            all_tokens,  # 分词匹配条件
                            nlp_questions,  # NLP匹配条件
                            limit
                        ))
                    else:
                        # 如果NLP没有匹配结果，使用原有查询
                        query_sql = """
                        SELECT question, question_tokens, answer, category_id,
                               (
                                   SELECT COUNT(*)
                                   FROM jsonb_array_elements_text(question_tokens->'tokens') AS token
                                   WHERE token = ANY(%s)
                               ) as match_count,
                               similarity(question, %s) as text_similarity,
                               0 as nlp_matched
                        FROM qa_knowledge
                        WHERE question_tokens->'tokens' ?| %s
                        ORDER BY match_count DESC, text_similarity DESC
                        LIMIT %s
                        """

                        cursor.execute(query_sql, (all_tokens, question, all_tokens, limit))

                    results = cursor.fetchall()

                    # 4. 格式化结果，合并NLP匹配信息
                    formatted_results = []
                    nlp_similarity_map = {}

                    # 创建NLP相似度映射
                    if not nlp_df.empty:
                        for _, row in nlp_df.iterrows():
                            nlp_similarity_map[row['问题']] = {
                                'nlp_similarity': row['相似度'],
                                'matched_keywords': row['匹配关键词']
                            }

                    for row in results:
                        question_text = row[0]
                        result_item = {
                            'question': question_text,
                            'question_tokens': row[1],
                            'answer': row[2],
                            'category_id': row[3],
                            'match_count': row[4] or 0,
                            'text_similarity': row[5] or 0.0,
                            'nlp_matched': bool(row[6]),
                            'nlp_similarity': 0.0,
                            'matched_keywords': ''
                        }

                        # 如果在NLP结果中找到，添加NLP信息
                        if question_text in nlp_similarity_map:
                            nlp_info = nlp_similarity_map[question_text]
                            result_item['nlp_similarity'] = nlp_info['nlp_similarity']
                            result_item['matched_keywords'] = nlp_info['matched_keywords']

                        formatted_results.append(result_item)

                    logger.info(f"混合查询返回 {len(formatted_results)} 个结果")
                    return formatted_results

            finally:
                conn.close()

        except Exception as e:
            logger.error(f"混合搜索失败: {e}")
            import traceback
            traceback.print_exc()
            return []

    def get_best_answer_hybrid(self, question: str, category_id: str) -> Dict[str, Any]:
        """获取混合查询的最佳答案

        Args:
            question (str): 查询问题

        Returns:
            Dict[str, Any]: 混合查询结果
        """
        results = self.search_qa_hybrid(question, category_id, limit=15)

        if results:
            # 返回所有结果，按优先级排序
            all_results = []
            for result in results:
                all_results.append({
                    'question': result['question'],
                    'answer': result['answer']['answer'],
                    'match_count': result['match_count'],
                    'text_similarity': result['text_similarity'],
                    'nlp_matched': result['nlp_matched'],
                    'nlp_similarity': result['nlp_similarity'],
                    'matched_keywords': result['matched_keywords'],
                    'category_id': result.get('category_id'),
                    # 计算综合得分
                    'combined_score': (
                            result['match_count'] * 0.3 +
                            result['text_similarity'] * 0.3 +
                            result['nlp_similarity'] * 0.4 +
                            (1.0 if result['nlp_matched'] else 0.0) * 0.2
                    )
                })

            # 按综合得分重新排序
            all_results.sort(key=lambda x: x['combined_score'], reverse=True)

            return {
                'success': True,
                'question': question,
                'total_count': len(all_results),
                'results': all_results
            }
        else:
            return {
                'success': False,
                'question': question,
                'message': '未找到相关答案'
            }


def exec_nlp_query(question: str, category_id: str):
    """执行单个问题的混合查询测试

    该函数用于测试混合查询功能，结合数据库分词匹配和NLP语义匹配，
    为单个问题提供最佳答案。主要用于开发和调试阶段的功能验证。

    Args:
        question (str): 待查询的问题文本

    Returns:
        None: 该函数主要用于测试，通过logger输出查询结果，不返回值

    功能说明:
        1. 初始化数据库连接配置
        2. 创建QAQuery查询工具实例
        3. 调用混合查询方法获取结果
        4. 格式化输出前5个最佳匹配结果，包括：
           - 匹配的问题
           - 对应答案
           - 综合得分（基于分词匹配、文本相似度、NLP相似度的加权计算）
           - NLP匹配状态和相似度
           - 匹配的关键词

    使用场景:
        - 单问题查询测试
        - 查询效果验证
        - 调试混合查询算法

    示例:
        exec_query("大伙房水库的总库容是多少？")
    """
    # 数据库连接配置
    db_config = DB_CONFIG

    # 创建查询工具实例
    query_tool = QAQuery(db_config)

    # 输出查询开始信息
    logger.debug(f"\n查询: {question}")
    logger.debug("-" * 50)

    # 执行混合查询
    result = query_tool.get_best_answer_hybrid(question, category_id)

    # 输出查询结果统计
    logger.debug(f"找到 {result['total_count']} 个结果")

    # 遍历并输出前5个最佳结果的详细信息
    # 构建简化的结构化输出
    simplified_output = {
        "query": question,
        "total_matches": result['total_count'],
        "top_results": [{
            "question": item['question'],
            "answer": item['answer'],
            "category_id": item['category_id']
        } for item in result['results']]
    }
    print(simplified_output)
    #
    # # 测试使用问题+答案的推荐
    # result = get_bert_recommendations(
    #     category_id,
    #     question,
    #     top_k=15,
    #     use_question_only=False,
    #     generate_cluster_report=False  # 启用簇信息报告生成
    # )
    # # 输出查询结果统计
    # logger.debug(f"找到 {result['total_matches']} 个结果")
    #
    # # 遍历并输出前5个最佳结果的详细信息
    # # 构建简化的结构化输出
    # simplified_output = {
    #     "query": question,
    #     "total_matches": result['total_matches'],
    #     "top_results": [{
    #         "question": item['question'],
    #         # "answer": item['answer'],
    #         "category_id": item['category_id']
    #     } for item in result['top_results']]
    # }
    # print(simplified_output)

    logger.info("仅问题推荐结果:")
    logger.info(result)

    # 生成markdown格式字符串
    markdown_output = f"""
        以下是个关于水利相关的问答结果。请根据我的问题，在回答种重新总结最佳答案。
        请注意，找出来的答案可能不是最佳的答案，甚至部分回答可能跟当前问题无关，但是可以帮助你了解问题的情况。

        # 查询结果

        **查询问题:** {simplified_output['query']}

        """

    # 字数控制：确保总字数不超过2500字
    max_total_chars = 2500
    current_chars = len(markdown_output)

    for i, result in enumerate(simplified_output['top_results'], 1):
        # 计算当前结果项的预估字数
        result_content = f"""---
    **问题:** {result['question']}
    
    **回答:** {result['answer']}
    ---

    """

        # 检查添加当前结果后是否会超过字数限制
        if current_chars + len(result_content) > max_total_chars:
            break

        # 如果不会超过限制，添加当前结果
        markdown_output += result_content
        current_chars += len(result_content)

        # 额外安全检查：如果已经处理了5个结果，也跳出循环
        if i >= 20:
            break

    # 输出markdown格式结果
    return markdown_output.replace('    ', '')
