# -*- coding: utf-8 -*-
# @Author  : gaoyu
# @Time    : 2025/1/27
# @Function: QA数据分词后导入PostgreSQL数据库

import os
import re
import psycopg2
from psycopg2.extras import Json
from loguru import logger
from nlp.nlp_matcher import NLPMatcher
from typing import List, Tuple, Optional
from config import DB_CONFIG


class QAImporter:
    def __init__(self, db_config: dict):
        """初始化QA导入器

        Args:
            db_config (dict): 数据库连接配置
        """
        self.db_config = db_config
        self.nlp_matcher = NLPMatcher()
        self.conn = None

    def connect_db(self):
        """连接数据库"""
        try:
            if self.conn is None or self.conn.closed != 0:
                self.conn = psycopg2.connect(**self.db_config)
                logger.info("数据库连接成功")
        except Exception as e:
            logger.error(f"数据库连接失败: {e}")
            raise

    def create_table(self):
        """创建QA知识库表"""
        create_table_sql = """
        CREATE TABLE IF NOT EXISTS qa_knowledge (
            question TEXT PRIMARY KEY,
            question_tokens JSONB NOT NULL,
            answer JSONB NOT NULL,
            category_id VARCHAR(20) NOT NULL,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
            updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        );
        """

        # 创建索引以提高查询性能
        create_index_sql = """
        CREATE INDEX IF NOT EXISTS idx_qa_question_tokens_gin 
        ON qa_knowledge USING GIN (question_tokens);

        CREATE INDEX IF NOT EXISTS idx_qa_tokens_array 
        ON qa_knowledge USING GIN ((question_tokens->'tokens'));
        """

        try:
            with self.conn.cursor() as cursor:
                cursor.execute(create_table_sql)
                cursor.execute(create_index_sql)
                self.conn.commit()
                logger.info("QA知识库表检查/创建成功")
        except Exception as e:
            logger.error(f"创建表失败: {e}")
            raise

    def parse_filename(self, filename: str) -> Tuple[Optional[str], Optional[str]]:
        """从文件名解析水库名称和ID

        Args:
            filename (str): 文件名

        Returns:
            Tuple[Optional[str], Optional[str]]: (水库名称, 水库ID)
        """
        # 支持格式: 水库名_水库ID.txt
        pattern = r"^(.+?)_(\d+)\.txt$"
        match = re.match(pattern, filename)

        if match:
            reservoir_name = match.group(1).strip()
            category_id = match.group(2).strip()
            return reservoir_name, category_id

        logger.warning(f"无法从文件名解析水库信息: {filename}")
        return None, None

    def parse_qa_file(self, file_path: str) -> List[Tuple[str, str]]:
        """解析QA文件

        Args:
            file_path (str): QA文件路径

        Returns:
            List[Tuple[str, str]]: 问题和答案的列表
        """
        qa_pairs = []

        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()

            # 使用正则表达式提取问题和答案
            pattern = r'【问题\d+】(.*?)\n【回答】(.*?)(?=\n\n【问题|$)'
            matches = re.findall(pattern, content, re.DOTALL)

            for question, answer in matches:
                question = question.strip()
                answer = answer.strip()
                if question and answer:
                    qa_pairs.append((question, answer))

            logger.info(f"解析到 {len(qa_pairs)} 个问答对")
            return qa_pairs

        except Exception as e:
            logger.error(f"解析QA文件失败: {e}")
            raise

    def tokenize_question(self, question: str) -> dict:
        """对问题进行分词

        Args:
            question (str): 问题文本

        Returns:
            dict: 分词结果，包含tokens和weights信息
        """
        try:
            # 使用NLP匹配器进行分词
            tokens = self.nlp_matcher._tokenize(question)

            # 提取关键词
            keywords = self.nlp_matcher.extract_keywords(question)

            # 构建分词结果字典
            token_data = {
                "tokens": list(set(tokens + [kw[0] for kw in keywords])),
                "keywords": [{
                    "word": kw[0],
                    "weight": kw[1]
                } for kw in keywords[:10]],  # 保留前10个关键词
                "token_count": len(tokens)
            }

            logger.debug(f"问题: {question}")
            logger.debug(f"分词结果: {token_data}")

            return token_data

        except Exception as e:
            logger.error(f"分词失败: {e}")
            return {"tokens": [], "keywords": [], "token_count": 0}

    def insert_qa_data(self, qa_pairs: List[Tuple[str, str]], category_id: str):
        """插入QA数据到数据库

        Args:
            qa_pairs (List[Tuple[str, str]]): 问答对列表
            category_id (str): 问题所属的类别ID
        """
        insert_sql = """
        INSERT INTO qa_knowledge (question, question_tokens, answer, category_id)
        VALUES (%s, %s, %s, %s)
        ON CONFLICT (question) DO UPDATE SET
            question_tokens = EXCLUDED.question_tokens,
            answer = EXCLUDED.answer,
            category_id = EXCLUDED.category_id,
            updated_at = CURRENT_TIMESTAMP;
        """

        try:
            with self.conn.cursor() as cursor:
                for question, answer in qa_pairs:
                    # 对问题进行分词
                    token_data = self.tokenize_question(question)

                    # 构建答案JSON
                    answer_json = {"answer": answer}

                    # 插入数据
                    cursor.execute(insert_sql, (
                        question,
                        Json(token_data),
                        Json(answer_json),
                        category_id
                    ))

                self.conn.commit()
                logger.info(f"成功插入或更新 {len(qa_pairs)} 条QA数据")

        except Exception as e:
            logger.error(f"插入数据失败: {e}")
            self.conn.rollback()
            raise

    def import_qa_file(self, file_path: str, category_id: str):
        """导入单个QA文件到数据库（不关闭连接）"""
        try:
            # 解析QA文件
            qa_pairs = self.parse_qa_file(file_path)

            # 插入数据
            self.insert_qa_data(qa_pairs, category_id)

            logger.info(f"QA数据导入完成: {file_path}")

        except Exception as e:
            logger.error(f"导入失败: {e}")
            raise

    def import_qa_folder(self, folder_path: str):
        """批量导入文件夹中的所有QA文件

        Args:
            folder_path (str): 包含QA文件的文件夹路径
        """
        # 确保文件夹存在
        if not os.path.exists(folder_path):
            logger.error(f"文件夹不存在: {folder_path}")
            return

        # 获取所有txt文件
        txt_files = [f for f in os.listdir(folder_path) if f.endswith('.txt')]

        if not txt_files:
            logger.warning(f"在 {folder_path} 中没有找到任何QA文件")
            return

        logger.info(f"找到 {len(txt_files)} 个QA文件")

        try:
            # 连接数据库
            self.connect_db()

            # 创建表
            self.create_table()

            # 处理每个文件
            success_count = 0
            for filename in txt_files:
                file_path = os.path.join(folder_path, filename)
                try:
                    # 解析文件名获取水库ID
                    reservoir_name, category_id = self.parse_filename(filename)

                    if not reservoir_name or not category_id:
                        logger.warning(f"跳过文件 {filename} - 无法解析水库信息")
                        continue

                    logger.info(f"处理水库: {reservoir_name} (ID: {category_id})")

                    # 导入单个文件
                    self.import_qa_file(file_path, category_id)
                    success_count += 1

                except Exception as e:
                    logger.error(f"处理文件 {filename} 失败: {e}")

            logger.info(f"处理完成: {success_count}/{len(txt_files)} 个文件成功导入")

        except Exception as e:
            logger.error(f"批量导入过程中出错: {e}")
        finally:
            # 所有文件处理完后关闭数据库连接
            if self.conn:
                self.conn.close()
                logger.info("数据库连接已关闭")

    def query_by_tokens(self, tokens: List[str], limit: int = 5):
        """根据分词结果查询相关问题

        Args:
            tokens (List[str]): 分词列表
            limit (int): 返回结果数量限制

        Returns:
            List: 查询结果
        """
        try:
            self.connect_db()

            # 使用JSONB查询，检查tokens数组中是否包含查询词汇
            query_sql = """
            SELECT question, question_tokens, answer,
                   (
                       SELECT COUNT(*)
                       FROM jsonb_array_elements_text(question_tokens->'tokens') AS token
                       WHERE token = ANY(%s)
                   ) as match_count
            FROM qa_knowledge
            WHERE question_tokens->'tokens' ?| %s
            ORDER BY match_count DESC
            LIMIT %s
            """

            with self.conn.cursor() as cursor:
                cursor.execute(query_sql, (tokens, tokens, limit))
                results = cursor.fetchall()

                return results

        except Exception as e:
            logger.error(f"查询失败: {e}")
            return []
        finally:
            if self.conn:
                self.conn.close()


def main():
    """主函数"""
    # 数据库配置
    db_config = DB_CONFIG

    # QA文件路径
    qa_folder_path = "../../qa_files"
    # qa_file_path = "qa.txt"
    # category_id = "21100150"

    # 创建导入器
    importer = QAImporter(db_config)

    try:
        # 导入QA数据
        importer.import_qa_folder(qa_folder_path)

        # 测试查询
        test_question = "大伙房水库流域面积多少？"
        token_data = importer.tokenize_question(test_question)
        logger.info(f"测试问题: {test_question}")
        logger.info(f"分词结果: {token_data}")

        # 查询相关问题 - 修正：从字典中提取tokens列表
        tokens = token_data.get('tokens', [])
        results = importer.query_by_tokens(tokens)
        logger.info(f"查询到 {len(results)} 个相关问题")

        for i, (question, question_tokens, answer, match_count) in enumerate(results, 1):
            logger.info(f"结果 {i}: {question} (匹配度: {match_count})")
            logger.info(f"答案: {answer['answer'][:100]}...")

    except Exception as e:
        logger.error(f"程序执行失败: {e}")


if __name__ == "__main__":
    main()
