# -*- coding: utf-8 -*-
"""
PostgreSQL 问答数据存储模块

基于 PostgreSQL + pgvector 的问答数据存储实现。
支持向量相似度搜索、关键词检索和传统的 CRUD 操作。
"""
import json
import os
from typing import Dict, List, Optional, Union, Any

from loguru import logger
from sqlalchemy import text, or_

from config.database_config import DatabaseConfig
from .models import (
    QARecord, Base,
    TABLE_MAPPING, FILE_TO_TABLE
)
from .qa_storage import QAStorage  # 继承原有接口


class PostgresQAStorage(QAStorage):
    """PostgreSQL 多表问答数据存储管理器"""

    def __init__(self, 
             table_name: str = None, 
             knowledge_bases: List[str] = None, 
             data_dir: str = "data/raw",
             embedding_function=None,
             force_reload: bool = False):
        """
        初始化 PostgreSQL 存储管理器
        
        Args:
            embedding_function: 文本嵌入函数
            table_name: 指定单个表名（如 'logic_qa'）
            knowledge_bases: 知识库文件列表（如 ['logic_qa.json', 'flood_qa.json']）
            data_dir: 知识库文件目录
            force_reload: 是否强制重新加载数据
        """
        self.session_factory = DatabaseConfig.create_session_factory()
        self.embedding_function = embedding_function
        self.data_dir = data_dir
        self.knowledge_bases = knowledge_bases  # Add this line to store knowledge_bases
    
        # 确定要使用的表
        if table_name:
            self.active_tables = [table_name]
        elif knowledge_bases:
            self.active_tables = [FILE_TO_TABLE.get(kb, kb.replace('.json', '')) for kb in knowledge_bases]
        else:
            self.active_tables = list(TABLE_MAPPING.keys())
    
        # 初始化数据库
        self._init_database()
    
        # 加载知识库数据
        self._load_knowledge_bases(force_reload=force_reload)
    
        logger.info(f"PostgreSQL 存储初始化完成，活跃表: {self.active_tables}")

    def _get_table_class(self, table_name: str):
        """获取表对应的模型类"""
        return TABLE_MAPPING.get(table_name, QARecord)

    def _load_knowledge_bases(self, force_reload: bool = False):
        """
        加载知识库数据到数据库
        
        Args:
            force_reload: 是否强制重新加载数据，即使表中已有数据
        """
        if not self.knowledge_bases:
            logger.info("未指定知识库文件，跳过数据加载")
            return
    
        logger.info(f"开始加载知识库: {self.knowledge_bases}")
    
        for kb_file in self.knowledge_bases:
            file_path = os.path.join(self.data_dir, kb_file)
            if not os.path.exists(file_path):
                logger.warning(f"知识库文件不存在: {file_path}")
                continue
    
            table_name = FILE_TO_TABLE.get(kb_file, kb_file.replace('.json', ''))
            table_class = self._get_table_class(table_name)
    
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    qa_data_list = json.load(f)
    
                with self.session_factory() as session:
                    # 检查表中是否已有数据
                    existing_count = session.query(table_class).count()
                    if existing_count > 0 and not force_reload:
                        logger.info(f"表 {table_name} 中已有 {existing_count} 条记录，跳过加载")
                        continue
                    elif existing_count > 0 and force_reload:
                        logger.info(f"强制重新加载：清空表 {table_name} 中的 {existing_count} 条记录")
                        session.query(table_class).delete()
                        session.commit()
    
                    # 加载数据
                    for qa_data in qa_data_list:
                        record = table_class(
                            key=qa_data['key'],
                            question=qa_data['question'],
                            answer=qa_data['answer'],
                            keywords=qa_data.get('keywords', []),
                            category=qa_data.get('category', table_name),
                            source=qa_data.get('source', kb_file)
                        )
    
                        # 生成向量嵌入
                        if self.embedding_function:
                            try:
                                question_vector = self.embedding_function(qa_data['question'])
                                answer_vector = self.embedding_function(qa_data['answer'])
                                record.question_vector = question_vector
                                record.answer_vector = answer_vector
                            except Exception as e:
                                logger.warning(f"生成向量嵌入失败: {e}")
    
                        session.add(record)
    
                    session.commit()
                    loaded_count = session.query(table_class).count()
                    logger.info(f"知识库 {kb_file} 加载完成，表 {table_name} 共 {loaded_count} 条记录")
    
            except Exception as e:
                logger.error(f"加载知识库 {kb_file} 失败: {e}")

    def get_all_qa(self, table_name: str = None) -> List[Dict]:
        """获取指定表或所有表的问答数据"""
        tables_to_query = [table_name] if table_name else self.active_tables
        all_results = []

        with self.session_factory() as session:
            for table in tables_to_query:
                table_class = self._get_table_class(table)
                records = session.query(table_class).all()
                table_results = [record.to_dict() for record in records]
                # 添加表信息
                for result in table_results:
                    result['table_name'] = table
                all_results.extend(table_results)

        logger.debug(f"获取问答数据，共 {len(all_results)} 条")
        return all_results

    def vector_search(self, query_text: str, table_name: str = None, limit: int = 10) -> List[Dict]:
        """在指定表或所有表中进行向量搜索"""
        if not self.embedding_function:
            logger.warning("未配置嵌入函数，无法进行向量搜索")
            return []

        tables_to_search = [table_name] if table_name else self.active_tables
        all_results = []

        try:
            query_vector = self.embedding_function(query_text)

            with self.session_factory() as session:
                for table in tables_to_search:
                    table_class = self._get_table_class(table)
                    records = session.query(table_class).filter(
                        table_class.question_vector.isnot(None)
                    ).order_by(
                        table_class.question_vector.cosine_distance(query_vector)
                    ).limit(limit).all()

                    table_results = [record.to_dict() for record in records]
                    for result in table_results:
                        result['table_name'] = table
                    all_results.extend(table_results)

            # 如果搜索多个表，重新排序并限制结果数量
            if len(tables_to_search) > 1:
                all_results = all_results[:limit]

            logger.debug(f"向量搜索完成，找到 {len(all_results)} 条记录")
            return all_results

        except Exception as e:
            logger.error(f"向量搜索失败: {e}")
            return []

    def search_by_keywords(self, keywords: Union[str, List[str]], table_name: str = None) -> List[Dict]:
        """在指定表或所有表中进行关键词搜索"""
        if isinstance(keywords, str):
            keywords = [keywords]

        if not keywords:
            return []

        tables_to_search = [table_name] if table_name else self.active_tables
        all_results = []

        with self.session_factory() as session:
            for table in tables_to_search:
                table_class = self._get_table_class(table)
                conditions = []

                for keyword in keywords:
                    # 在问题和答案中搜索（文本搜索作为备选方案）
                    conditions.append(table_class.question.ilike(f'%{keyword}%'))
                    conditions.append(table_class.answer.ilike(f'%{keyword}%'))

                records = session.query(table_class).filter(or_(*conditions)).all()
                table_results = [record.to_dict() for record in records]
                for result in table_results:
                    result['table_name'] = table
                all_results.extend(table_results)

        logger.debug(f"关键词搜索完成，找到 {len(all_results)} 条记录")
        return all_results

    def get_table_statistics(self) -> Dict[str, Any]:
        """获取各表的统计信息"""
        stats = {}

        with self.session_factory() as session:
            for table_name in self.active_tables:
                table_class = self._get_table_class(table_name)
                total_count = session.query(table_class).count()
                vector_count = session.query(table_class).filter(
                    table_class.question_vector.isnot(None)
                ).count()

                stats[table_name] = {
                    'total_count': total_count,
                    'vector_count': vector_count,
                    'vector_coverage': vector_count / total_count if total_count > 0 else 0
                }

        return stats

    def _init_database(self):
        """
        初始化数据库表结构
        """
        try:
            engine = DatabaseConfig.create_engine()

            # 创建 pgvector 扩展
            with engine.connect() as conn:
                conn.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
                conn.commit()

            # 创建表
            Base.metadata.create_all(engine)
            logger.info("数据库表结构初始化完成")

        except Exception as e:
            logger.error(f"数据库初始化失败: {e}")
            raise




    def get_statistics(self) -> Dict[str, Union[int, List[str]]]:
        """
        获取存储统计信息
        
        Returns:
            Dict[str, Union[int, List[str]]]: 包含统计信息的字典
        """
        with self.session_factory() as session:
            total_count = session.query(QARecord).count()
            vector_count = session.query(QARecord).filter(
                QARecord.question_vector.isnot(None)
            ).count()

            # 统计分类
            categories = session.query(QARecord.category).distinct().all()
            category_list = [cat[0] for cat in categories if cat[0]]

            return {
                "total_qa_count": total_count,
                "vector_enabled_count": vector_count,
                "categories": category_list,
                "vector_coverage": vector_count / total_count if total_count > 0 else 0
            }
