# -*- coding: utf-8 -*-
"""
结构化检索器
基于结构化数据的检索方法
"""

import json
import os
import sqlite3
from typing import Dict, List, Any, Tuple, Optional, Union
from loguru import logger
from collections import defaultdict
import re
import pandas as pd

from .base_retriever import BaseRetriever, RetrievalResult
from ..ollama_client import ollama_client


class StructuredQuery:
    """结构化查询"""
    
    def __init__(self, query_type: str, conditions: Dict[str, Any] = None, 
                 aggregations: List[str] = None, sorting: Dict[str, str] = None):
        self.query_type = query_type  # select, filter, aggregate, join
        self.conditions = conditions or {}  # 查询条件
        self.aggregations = aggregations or []  # 聚合操作
        self.sorting = sorting or {}  # 排序
        self.limit = None
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'query_type': self.query_type,
            'conditions': self.conditions,
            'aggregations': self.aggregations,
            'sorting': self.sorting,
            'limit': self.limit
        }


class StructuredRetriever(BaseRetriever):
    """结构化检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化结构化检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("StructuredRetriever", config)
        
        # 配置参数
        self.db_path = self.config.get("db_path", "data/structured_data.db")
        self.enable_sql_generation = self.config.get("enable_sql_generation", True)
        self.enable_api_simulation = self.config.get("enable_api_simulation", True)
        self.max_results = self.config.get("max_results", 100)
        self.cache_path = self.config.get("cache_path", "data/structured_cache.json")
        
        # 数据库连接
        self.db_connection = None
        
        # 数据结构
        self.data_schema = {}
        self.indexed_fields = set()
        self.data_types = {}
        
        # API模拟
        self.api_endpoints = {}
        
        # 查询缓存
        self.query_cache = {}
        
        # SQL模板
        self.sql_templates = {
            'select_all': "SELECT * FROM {table} WHERE {conditions} ORDER BY {order} LIMIT {limit}",
            'select_fields': "SELECT {fields} FROM {table} WHERE {conditions} ORDER BY {order} LIMIT {limit}",
            'count': "SELECT COUNT(*) FROM {table} WHERE {conditions}",
            'aggregate': "SELECT {aggregations} FROM {table} WHERE {conditions} GROUP BY {group_by}",
            'search': "SELECT * FROM {table} WHERE {search_conditions} ORDER BY {relevance} LIMIT {limit}"
        }
        
        # 查询解析模板
        self.query_patterns = {
            'count': r'(多少|数量|总数|计数)',
            'filter': r'(筛选|过滤|条件|满足)',
            'sort': r'(排序|按照|根据.*排列)',
            'aggregate': r'(平均|总和|最大|最小|求和)',
            'compare': r'(比较|对比|差异)',
            'top': r'(前.*名|最.*的|排名)',
            'range': r'(范围|之间|到|从.*到)',
            'group': r'(分组|按.*分类|归类)'
        }
        
        logger.info("🗄️ 结构化检索器初始化完成")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化结构化检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化结构化检索器，数据量: {len(data)}")
            
            # 加载缓存
            self._load_cache()
            
            # 初始化数据库
            self._initialize_database(data)
            
            # 分析数据结构
            self._analyze_data_schema(data)
            
            # 创建索引
            self._create_indices()
            
            # 设置API端点
            self._setup_api_endpoints()
            
            self.is_initialized = True
            logger.info("✅ 结构化检索器初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"❌ 结构化检索器初始化异常: {e}")
            return False
    
    def _initialize_database(self, data: List[Dict[str, Any]]) -> None:
        """
        初始化数据库
        
        Args:
            data: 训练数据
        """
        try:
            # 创建数据库目录
            os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
            
            # 连接数据库
            self.db_connection = sqlite3.connect(self.db_path)
            self.db_connection.row_factory = sqlite3.Row  # 使结果可以按列名访问
            
            cursor = self.db_connection.cursor()
            
            # 创建主表
            cursor.execute("""
                CREATE TABLE IF NOT EXISTS documents (
                    id INTEGER PRIMARY KEY,
                    question TEXT NOT NULL,
                    answer TEXT,
                    category INTEGER,
                    difficulty TEXT,
                    type TEXT,
                    keywords TEXT,
                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
                    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
                )
            """)
            
            # 创建分类表
            cursor.execute("""
                CREATE TABLE IF NOT EXISTS categories (
                    id INTEGER PRIMARY KEY,
                    name TEXT NOT NULL,
                    description TEXT
                )
            """)
            
            # 创建关键词表
            cursor.execute("""
                CREATE TABLE IF NOT EXISTS keywords (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    document_id INTEGER,
                    keyword TEXT NOT NULL,
                    weight REAL DEFAULT 1.0,
                    FOREIGN KEY (document_id) REFERENCES documents (id)
                )
            """)
            
            # 创建统计表
            cursor.execute("""
                CREATE TABLE IF NOT EXISTS statistics (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    document_id INTEGER,
                    view_count INTEGER DEFAULT 0,
                    like_count INTEGER DEFAULT 0,
                    difficulty_score REAL DEFAULT 0.0,
                    quality_score REAL DEFAULT 0.0,
                    FOREIGN KEY (document_id) REFERENCES documents (id)
                )
            """)
            
            # 插入数据
            self._insert_data(data)
            
            self.db_connection.commit()
            logger.info("✅ 数据库初始化完成")
            
        except Exception as e:
            logger.error(f"❌ 数据库初始化失败: {e}")
            raise
    
    def _insert_data(self, data: List[Dict[str, Any]]) -> None:
        """
        插入数据到数据库
        
        Args:
            data: 训练数据
        """
        cursor = self.db_connection.cursor()
        
        # 清空现有数据
        cursor.execute("DELETE FROM documents")
        cursor.execute("DELETE FROM categories")
        cursor.execute("DELETE FROM keywords")
        cursor.execute("DELETE FROM statistics")
        
        # 收集所有分类
        categories = set()
        for item in data:
            if item.get('category'):
                categories.add(item['category'])
        
        # 插入分类
        for i, category in enumerate(categories, 1):
            cursor.execute(
                "INSERT INTO categories (id, name) VALUES (?, ?)",
                (category, f"分类{category}")
            )
        
        # 插入文档
        for item in data:
            doc_id = item.get('id')
            keywords_str = json.dumps(item.get('keywords', []), ensure_ascii=False) if item.get('keywords') else None
            
            cursor.execute("""
                INSERT INTO documents (id, question, answer, category, difficulty, type, keywords)
                VALUES (?, ?, ?, ?, ?, ?, ?)
            """, (
                doc_id,
                item['question'],
                item.get('answer', ''),
                item.get('category'),
                item.get('difficulty', 'medium'),
                item.get('type', 'general'),
                keywords_str
            ))
            
            # 插入关键词
            if item.get('keywords'):
                for keyword in item['keywords']:
                    cursor.execute(
                        "INSERT INTO keywords (document_id, keyword) VALUES (?, ?)",
                        (doc_id, keyword)
                    )
            
            # 插入统计信息
            cursor.execute("""
                INSERT INTO statistics (document_id, difficulty_score, quality_score)
                VALUES (?, ?, ?)
            """, (
                doc_id,
                self._calculate_difficulty_score(item.get('difficulty', 'medium')),
                self._calculate_quality_score(item)
            ))
        
        logger.info(f"✅ 插入 {len(data)} 条文档数据")
    
    def _calculate_difficulty_score(self, difficulty: str) -> float:
        """计算难度分数"""
        difficulty_map = {
            'easy': 1.0,
            'medium': 2.0,
            'hard': 3.0
        }
        return difficulty_map.get(difficulty.lower(), 2.0)
    
    def _calculate_quality_score(self, item: Dict[str, Any]) -> float:
        """计算质量分数"""
        score = 5.0  # 基础分数
        
        # 答案长度因子
        answer_length = len(item.get('answer', ''))
        if answer_length > 100:
            score += 1.0
        elif answer_length > 50:
            score += 0.5
        
        # 关键词数量因子
        keywords_count = len(item.get('keywords', []))
        score += min(keywords_count * 0.2, 1.0)
        
        return min(score, 10.0)
    
    def _analyze_data_schema(self, data: List[Dict[str, Any]]) -> None:
        """
        分析数据结构
        
        Args:
            data: 训练数据
        """
        if not data:
            return
        
        # 分析字段类型
        sample_item = data[0]
        for field, value in sample_item.items():
            if isinstance(value, int):
                self.data_types[field] = 'INTEGER'
            elif isinstance(value, float):
                self.data_types[field] = 'REAL'
            elif isinstance(value, list):
                self.data_types[field] = 'JSON'
            else:
                self.data_types[field] = 'TEXT'
        
        # 设置索引字段
        self.indexed_fields = {'id', 'category', 'difficulty', 'type'}
        
        # 构建数据模式
        self.data_schema = {
            'tables': {
                'documents': {
                    'fields': self.data_types,
                    'primary_key': 'id',
                    'indices': list(self.indexed_fields)
                },
                'categories': {
                    'fields': {'id': 'INTEGER', 'name': 'TEXT', 'description': 'TEXT'},
                    'primary_key': 'id'
                },
                'keywords': {
                    'fields': {'id': 'INTEGER', 'document_id': 'INTEGER', 'keyword': 'TEXT', 'weight': 'REAL'},
                    'primary_key': 'id',
                    'foreign_keys': {'document_id': 'documents.id'}
                }
            }
        }
        
        logger.info(f"📋 数据模式分析完成: {len(self.data_types)} 个字段")
    
    def _create_indices(self) -> None:
        """
        创建数据库索引
        """
        cursor = self.db_connection.cursor()
        
        try:
            # 创建常用字段索引
            indices = [
                "CREATE INDEX IF NOT EXISTS idx_category ON documents(category)",
                "CREATE INDEX IF NOT EXISTS idx_difficulty ON documents(difficulty)",
                "CREATE INDEX IF NOT EXISTS idx_type ON documents(type)",
                "CREATE INDEX IF NOT EXISTS idx_keywords_document ON keywords(document_id)",
                "CREATE INDEX IF NOT EXISTS idx_keywords_keyword ON keywords(keyword)",
                "CREATE INDEX IF NOT EXISTS idx_statistics_document ON statistics(document_id)"
            ]
            
            for index_sql in indices:
                cursor.execute(index_sql)
            
            self.db_connection.commit()
            logger.info("✅ 数据库索引创建完成")
            
        except Exception as e:
            logger.warning(f"⚠️ 创建索引失败: {e}")
    
    def _setup_api_endpoints(self) -> None:
        """
        设置API端点模拟
        """
        self.api_endpoints = {
            '/documents': {
                'methods': ['GET', 'POST'],
                'parameters': ['category', 'difficulty', 'type', 'limit', 'offset'],
                'description': '获取文档列表'
            },
            '/documents/{id}': {
                'methods': ['GET'],
                'parameters': ['id'],
                'description': '获取特定文档'
            },
            '/search': {
                'methods': ['GET', 'POST'],
                'parameters': ['q', 'category', 'limit'],
                'description': '搜索文档'
            },
            '/categories': {
                'methods': ['GET'],
                'parameters': ['limit'],
                'description': '获取分类列表'
            },
            '/statistics': {
                'methods': ['GET'],
                'parameters': ['document_id', 'metric'],
                'description': '获取统计信息'
            }
        }
        
        logger.info(f"🔗 API端点设置完成: {len(self.api_endpoints)} 个端点")
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行结构化检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 结构化检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 1. 查询解析
                structured_query = self._parse_query(question)
                
                # 2. 执行结构化查询
                query_results = self._execute_structured_query(structured_query)
                
                # 3. API模拟（如果启用）
                if self.enable_api_simulation:
                    api_results = self._simulate_api_query(question, structured_query)
                    query_results.extend(api_results)
                
                # 4. 结果后处理
                processed_results = self._post_process_results(query_results, question)
                
                # 5. 生成答案
                answer, confidence = self._generate_structured_answer(question, processed_results)
                
                return answer, confidence, processed_results[:top_k]
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, processed_results = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=processed_results,
                metadata={
                    'method': 'structured',
                    'query_type': getattr(self, '_last_query_type', 'unknown'),
                    'sql_generated': self.enable_sql_generation,
                    'api_simulated': self.enable_api_simulation
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 结构化检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _parse_query(self, question: str) -> StructuredQuery:
        """
        解析查询
        
        Args:
            question: 查询问题
        
        Returns:
            StructuredQuery: 结构化查询
        """
        # 检查缓存
        cache_key = f"parse_{hash(question)}"
        if cache_key in self.query_cache:
            return self.query_cache[cache_key]
        
        # 分析查询类型
        query_type = self._detect_query_type(question)
        self._last_query_type = query_type
        
        # 提取查询条件
        conditions = self._extract_conditions(question)
        
        # 提取聚合操作
        aggregations = self._extract_aggregations(question)
        
        # 提取排序
        sorting = self._extract_sorting(question)
        
        structured_query = StructuredQuery(
            query_type=query_type,
            conditions=conditions,
            aggregations=aggregations,
            sorting=sorting
        )
        
        # 设置限制
        structured_query.limit = self._extract_limit(question)
        
        # 缓存结果
        self.query_cache[cache_key] = structured_query
        
        logger.info(f"🔍 查询解析: {query_type} - {conditions}")
        return structured_query
    
    def _detect_query_type(self, question: str) -> str:
        """
        检测查询类型
        
        Args:
            question: 查询问题
        
        Returns:
            str: 查询类型
        """
        question_lower = question.lower()
        
        for query_type, pattern in self.query_patterns.items():
            if re.search(pattern, question_lower):
                return query_type
        
        # 默认为选择查询
        return 'select'
    
    def _extract_conditions(self, question: str) -> Dict[str, Any]:
        """
        提取查询条件
        
        Args:
            question: 查询问题
        
        Returns:
            Dict[str, Any]: 查询条件
        """
        conditions = {}
        
        # 提取分类条件
        category_patterns = [r'分类(\d+)', r'类别(\d+)', r'第(\d+)类']
        for pattern in category_patterns:
            match = re.search(pattern, question)
            if match:
                conditions['category'] = int(match.group(1))
                break
        
        # 提取难度条件
        difficulty_patterns = {
            r'简单|容易|基础': 'easy',
            r'中等|一般|普通': 'medium',
            r'困难|复杂|高级': 'hard'
        }
        for pattern, difficulty in difficulty_patterns.items():
            if re.search(pattern, question):
                conditions['difficulty'] = difficulty
                break
        
        # 提取类型条件
        type_patterns = {
            r'选择题|单选|多选': 'choice',
            r'填空题|填空': 'fill',
            r'问答题|简答': 'qa',
            r'编程题|代码': 'code'
        }
        for pattern, question_type in type_patterns.items():
            if re.search(pattern, question):
                conditions['type'] = question_type
                break
        
        # 提取关键词条件
        keywords = self._extract_query_keywords(question)
        if keywords:
            conditions['keywords'] = keywords
        
        return conditions
    
    def _extract_query_keywords(self, question: str) -> List[str]:
        """
        提取查询关键词
        
        Args:
            question: 查询问题
        
        Returns:
            List[str]: 关键词列表
        """
        # 移除查询词汇
        query_words = {'什么', '如何', '怎么', '为什么', '哪些', '多少', '查询', '搜索', '找到'}
        
        words = re.findall(r'\b\w+\b', question)
        keywords = [word for word in words if word not in query_words and len(word) > 1]
        
        return keywords[:5]  # 限制关键词数量
    
    def _extract_aggregations(self, question: str) -> List[str]:
        """
        提取聚合操作
        
        Args:
            question: 查询问题
        
        Returns:
            List[str]: 聚合操作列表
        """
        aggregations = []
        
        aggregation_patterns = {
            r'总数|数量|多少个': 'COUNT(*)',
            r'平均|均值': 'AVG(difficulty_score)',
            r'最大|最高': 'MAX(quality_score)',
            r'最小|最低': 'MIN(quality_score)',
            r'总和|求和': 'SUM(view_count)'
        }
        
        for pattern, agg_func in aggregation_patterns.items():
            if re.search(pattern, question):
                aggregations.append(agg_func)
        
        return aggregations
    
    def _extract_sorting(self, question: str) -> Dict[str, str]:
        """
        提取排序条件
        
        Args:
            question: 查询问题
        
        Returns:
            Dict[str, str]: 排序条件
        """
        sorting = {}
        
        # 检测排序字段和方向
        if re.search(r'按.*难度.*排序', question):
            sorting['field'] = 'difficulty_score'
        elif re.search(r'按.*质量.*排序', question):
            sorting['field'] = 'quality_score'
        elif re.search(r'按.*时间.*排序', question):
            sorting['field'] = 'created_at'
        else:
            sorting['field'] = 'id'  # 默认排序
        
        # 检测排序方向
        if re.search(r'降序|从高到低|最.*的', question):
            sorting['direction'] = 'DESC'
        else:
            sorting['direction'] = 'ASC'
        
        return sorting
    
    def _extract_limit(self, question: str) -> Optional[int]:
        """
        提取结果限制
        
        Args:
            question: 查询问题
        
        Returns:
            Optional[int]: 结果限制数量
        """
        # 查找数字
        numbers = re.findall(r'(\d+)', question)
        
        # 查找限制词汇
        limit_patterns = [r'前(\d+)', r'最.*的(\d+)', r'(\d+)个', r'(\d+)条']
        
        for pattern in limit_patterns:
            match = re.search(pattern, question)
            if match:
                return int(match.group(1))
        
        # 如果有数字但没有明确的限制词汇，使用第一个数字
        if numbers:
            num = int(numbers[0])
            if 1 <= num <= 100:  # 合理范围
                return num
        
        return None
    
    def _execute_structured_query(self, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        执行结构化查询
        
        Args:
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: 查询结果
        """
        try:
            if structured_query.query_type == 'count':
                return self._execute_count_query(structured_query)
            elif structured_query.query_type == 'aggregate':
                return self._execute_aggregate_query(structured_query)
            elif structured_query.query_type == 'filter':
                return self._execute_filter_query(structured_query)
            else:
                return self._execute_select_query(structured_query)
                
        except Exception as e:
            logger.error(f"❌ 执行结构化查询失败: {e}")
            return []
    
    def _execute_select_query(self, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        执行选择查询
        
        Args:
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: 查询结果
        """
        cursor = self.db_connection.cursor()
        
        # 构建WHERE条件
        where_conditions = []
        params = []
        
        for field, value in structured_query.conditions.items():
            if field == 'keywords':
                # 关键词搜索
                keyword_conditions = []
                for keyword in value:
                    keyword_conditions.append("(question LIKE ? OR answer LIKE ?)")
                    params.extend([f"%{keyword}%", f"%{keyword}%"])
                if keyword_conditions:
                    where_conditions.append(f"({' OR '.join(keyword_conditions)})")
            else:
                where_conditions.append(f"{field} = ?")
                params.append(value)
        
        where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
        
        # 构建ORDER BY
        order_clause = "id ASC"
        if structured_query.sorting:
            field = structured_query.sorting.get('field', 'id')
            direction = structured_query.sorting.get('direction', 'ASC')
            order_clause = f"{field} {direction}"
        
        # 构建LIMIT
        limit_clause = structured_query.limit or 10
        
        # 执行查询
        sql = f"""
            SELECT d.*, s.difficulty_score, s.quality_score, s.view_count
            FROM documents d
            LEFT JOIN statistics s ON d.id = s.document_id
            WHERE {where_clause}
            ORDER BY {order_clause}
            LIMIT ?
        """
        
        params.append(limit_clause)
        
        cursor.execute(sql, params)
        rows = cursor.fetchall()
        
        # 转换为字典列表
        results = []
        for row in rows:
            result = {
                'id': row['id'],
                'question': row['question'],
                'answer': row['answer'],
                'category': row['category'],
                'difficulty': row['difficulty'],
                'type': row['type'],
                'difficulty_score': row['difficulty_score'],
                'quality_score': row['quality_score'],
                'view_count': row['view_count'] or 0,
                'score': row['quality_score'] or 5.0  # 默认分数
            }
            results.append(result)
        
        logger.info(f"📊 选择查询返回 {len(results)} 条结果")
        return results
    
    def _execute_count_query(self, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        执行计数查询
        
        Args:
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: 查询结果
        """
        cursor = self.db_connection.cursor()
        
        # 构建WHERE条件
        where_conditions = []
        params = []
        
        for field, value in structured_query.conditions.items():
            if field != 'keywords':
                where_conditions.append(f"{field} = ?")
                params.append(value)
        
        where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
        
        # 执行计数查询
        sql = f"SELECT COUNT(*) as count FROM documents WHERE {where_clause}"
        cursor.execute(sql, params)
        
        count_result = cursor.fetchone()
        count = count_result['count'] if count_result else 0
        
        # 返回格式化结果
        result = {
            'id': 'count_result',
            'question': '统计查询结果',
            'answer': f"满足条件的记录数量: {count}",
            'count': count,
            'score': 1.0
        }
        
        logger.info(f"📊 计数查询结果: {count}")
        return [result]
    
    def _execute_aggregate_query(self, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        执行聚合查询
        
        Args:
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: 查询结果
        """
        cursor = self.db_connection.cursor()
        
        # 构建聚合字段
        agg_fields = structured_query.aggregations or ['COUNT(*)']
        agg_clause = ', '.join(agg_fields)
        
        # 构建WHERE条件
        where_conditions = []
        params = []
        
        for field, value in structured_query.conditions.items():
            if field != 'keywords':
                where_conditions.append(f"{field} = ?")
                params.append(value)
        
        where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
        
        # 执行聚合查询
        sql = f"""
            SELECT {agg_clause}
            FROM documents d
            LEFT JOIN statistics s ON d.id = s.document_id
            WHERE {where_clause}
        """
        
        cursor.execute(sql, params)
        agg_result = cursor.fetchone()
        
        # 格式化结果
        result_text = "聚合查询结果: "
        if agg_result:
            values = [str(value) for value in agg_result]
            result_text += ", ".join(values)
        
        result = {
            'id': 'aggregate_result',
            'question': '聚合查询结果',
            'answer': result_text,
            'aggregation_result': dict(zip(agg_fields, agg_result)) if agg_result else {},
            'score': 1.0
        }
        
        logger.info(f"📊 聚合查询完成")
        return [result]
    
    def _execute_filter_query(self, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        执行过滤查询
        
        Args:
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: 查询结果
        """
        # 过滤查询与选择查询类似，但可能有更复杂的条件
        return self._execute_select_query(structured_query)
    
    def _simulate_api_query(self, question: str, structured_query: StructuredQuery) -> List[Dict[str, Any]]:
        """
        模拟API查询
        
        Args:
            question: 查询问题
            structured_query: 结构化查询
        
        Returns:
            List[Dict[str, Any]]: API查询结果
        """
        api_results = []
        
        try:
            # 模拟不同的API端点
            if 'search' in question.lower():
                api_results.extend(self._simulate_search_api(question))
            
            if 'statistics' in question.lower() or 'stats' in question.lower():
                api_results.extend(self._simulate_statistics_api(question))
            
            if 'category' in question.lower() or '分类' in question:
                api_results.extend(self._simulate_categories_api())
            
            logger.info(f"🔗 API模拟返回 {len(api_results)} 条结果")
            
        except Exception as e:
            logger.warning(f"⚠️ API模拟失败: {e}")
        
        return api_results
    
    def _simulate_search_api(self, question: str) -> List[Dict[str, Any]]:
        """
        模拟搜索API
        
        Args:
            question: 查询问题
        
        Returns:
            List[Dict[str, Any]]: 搜索结果
        """
        # 提取搜索关键词
        keywords = self._extract_query_keywords(question)
        
        if not keywords:
            return []
        
        cursor = self.db_connection.cursor()
        
        # 构建搜索条件
        search_conditions = []
        params = []
        
        for keyword in keywords[:3]:  # 限制关键词数量
            search_conditions.append("(question LIKE ? OR answer LIKE ?)")
            params.extend([f"%{keyword}%", f"%{keyword}%"])
        
        search_clause = " OR ".join(search_conditions)
        
        sql = f"""
            SELECT d.*, s.quality_score
            FROM documents d
            LEFT JOIN statistics s ON d.id = s.document_id
            WHERE {search_clause}
            ORDER BY s.quality_score DESC
            LIMIT 5
        """
        
        cursor.execute(sql, params)
        rows = cursor.fetchall()
        
        results = []
        for row in rows:
            result = {
                'id': row['id'],
                'question': row['question'],
                'answer': row['answer'],
                'score': row['quality_score'] or 5.0,
                'api_endpoint': '/search',
                'search_keywords': keywords
            }
            results.append(result)
        
        return results
    
    def _simulate_statistics_api(self, question: str) -> List[Dict[str, Any]]:
        """
        模拟统计API
        
        Args:
            question: 查询问题
        
        Returns:
            List[Dict[str, Any]]: 统计结果
        """
        cursor = self.db_connection.cursor()
        
        # 获取基本统计信息
        stats_sql = """
            SELECT 
                COUNT(*) as total_documents,
                AVG(difficulty_score) as avg_difficulty,
                AVG(quality_score) as avg_quality,
                MAX(quality_score) as max_quality,
                MIN(quality_score) as min_quality
            FROM documents d
            LEFT JOIN statistics s ON d.id = s.document_id
        """
        
        cursor.execute(stats_sql)
        stats_row = cursor.fetchone()
        
        if stats_row:
            stats_text = f"""
统计信息:
- 总文档数: {stats_row['total_documents']}
- 平均难度: {stats_row['avg_difficulty']:.2f}
- 平均质量: {stats_row['avg_quality']:.2f}
- 最高质量: {stats_row['max_quality']:.2f}
- 最低质量: {stats_row['min_quality']:.2f}
"""
            
            result = {
                'id': 'statistics_api',
                'question': '系统统计信息',
                'answer': stats_text,
                'statistics': dict(stats_row),
                'score': 1.0,
                'api_endpoint': '/statistics'
            }
            
            return [result]
        
        return []
    
    def _simulate_categories_api(self) -> List[Dict[str, Any]]:
        """
        模拟分类API
        
        Returns:
            List[Dict[str, Any]]: 分类结果
        """
        cursor = self.db_connection.cursor()
        
        # 获取分类统计
        categories_sql = """
            SELECT 
                c.id,
                c.name,
                COUNT(d.id) as document_count,
                AVG(s.quality_score) as avg_quality
            FROM categories c
            LEFT JOIN documents d ON c.id = d.category
            LEFT JOIN statistics s ON d.id = s.document_id
            GROUP BY c.id, c.name
            ORDER BY document_count DESC
        """
        
        cursor.execute(categories_sql)
        rows = cursor.fetchall()
        
        if rows:
            categories_text = "分类信息:\n"
            for row in rows:
                categories_text += f"- 分类{row['id']}: {row['document_count']}个文档, 平均质量: {row['avg_quality']:.2f}\n"
            
            result = {
                'id': 'categories_api',
                'question': '分类统计信息',
                'answer': categories_text,
                'categories': [dict(row) for row in rows],
                'score': 1.0,
                'api_endpoint': '/categories'
            }
            
            return [result]
        
        return []
    
    def _post_process_results(self, results: List[Dict[str, Any]], question: str) -> List[Dict[str, Any]]:
        """
        后处理结果
        
        Args:
            results: 原始结果
            question: 查询问题
        
        Returns:
            List[Dict[str, Any]]: 处理后的结果
        """
        if not results:
            return results
        
        # 去重
        seen_ids = set()
        unique_results = []
        for result in results:
            result_id = result.get('id')
            if result_id not in seen_ids:
                seen_ids.add(result_id)
                unique_results.append(result)
        
        # 重新评分
        for result in unique_results:
            # 基础分数
            base_score = result.get('score', 0.0)
            
            # 查询相关性分数
            relevance_score = self._calculate_relevance_score(question, result)
            
            # 质量分数
            quality_score = result.get('quality_score', 5.0) / 10.0  # 归一化
            
            # 综合分数
            final_score = base_score * 0.4 + relevance_score * 0.4 + quality_score * 0.2
            result['final_score'] = final_score
        
        # 按最终分数排序
        unique_results.sort(key=lambda x: x.get('final_score', 0), reverse=True)
        
        return unique_results
    
    def _calculate_relevance_score(self, question: str, result: Dict[str, Any]) -> float:
        """
        计算相关性分数
        
        Args:
            question: 查询问题
            result: 结果项
        
        Returns:
            float: 相关性分数
        """
        question_words = set(question.lower().split())
        result_text = f"{result.get('question', '')} {result.get('answer', '')}"
        result_words = set(result_text.lower().split())
        
        if not question_words or not result_words:
            return 0.0
        
        intersection = question_words.intersection(result_words)
        union = question_words.union(result_words)
        
        return len(intersection) / len(union) if union else 0.0
    
    def _generate_structured_answer(self, question: str, results: List[Dict[str, Any]]) -> Tuple[str, float]:
        """
        生成结构化答案
        
        Args:
            question: 查询问题
            results: 查询结果
        
        Returns:
            Tuple[str, float]: (答案, 置信度)
        """
        if not results:
            # 使用LLM直接回答
            answer = ollama_client.answer_question(question)
            return answer, 0.3
        
        # 检查是否是统计查询
        if any(result.get('id') in ['count_result', 'aggregate_result', 'statistics_api'] for result in results):
            # 直接返回统计结果
            stat_result = next((r for r in results if r.get('id') in ['count_result', 'aggregate_result', 'statistics_api']), None)
            if stat_result:
                return stat_result.get('answer', ''), 0.9
        
        # 常规问答
        best_result = results[0]
        answer = best_result.get('answer', '')
        confidence = min(best_result.get('final_score', 0.0), 1.0)
        
        # 如果置信度较低，使用LLM增强
        if confidence < 0.6:
            context_parts = []
            for result in results[:3]:
                if result.get('answer'):
                    context_parts.append(result['answer'])
            
            context = "\n".join(context_parts)
            enhanced_answer = ollama_client.answer_question(question, context)
            
            if enhanced_answer and len(enhanced_answer) > len(answer):
                answer = enhanced_answer
                confidence = min(confidence + 0.2, 1.0)
        
        return answer, confidence
    
    def _load_cache(self) -> None:
        """
        加载缓存
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'r', encoding='utf-8') as f:
                    self.query_cache = json.load(f)
                logger.info(f"📥 加载结构化查询缓存: {len(self.query_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 加载结构化查询缓存失败: {e}")
            self.query_cache = {}
    
    def _save_cache(self) -> None:
        """
        保存缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            # 只保存可序列化的缓存项
            serializable_cache = {}
            for key, value in self.query_cache.items():
                if hasattr(value, 'to_dict'):
                    serializable_cache[key] = value.to_dict()
                else:
                    serializable_cache[key] = value
            
            with open(self.cache_path, 'w', encoding='utf-8') as f:
                json.dump(serializable_cache, f, ensure_ascii=False, indent=2)
            logger.info(f"💾 保存结构化查询缓存: {len(serializable_cache)} 条记录")
        except Exception as e:
            logger.warning(f"⚠️ 保存结构化查询缓存失败: {e}")
    
    def __del__(self):
        """析构函数，关闭数据库连接"""
        if hasattr(self, 'db_connection') and self.db_connection:
            self.db_connection.close()


if __name__ == "__main__":
    # 测试结构化检索器
    config = {
        "db_path": "data/test_structured.db",
        "enable_sql_generation": True,
        "enable_api_simulation": True,
        "max_results": 10
    }
    
    retriever = StructuredRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": 1,
            "difficulty": "medium",
            "type": "qa",
            "keywords": ["机器学习", "人工智能", "算法"]
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": 1,
            "difficulty": "hard",
            "type": "qa",
            "keywords": ["深度学习", "神经网络", "学习"]
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试不同类型的查询
        test_queries = [
            "查询分类1的所有问题",
            "有多少个困难的问题？",
            "按质量排序的前3个问题",
            "搜索机器学习相关的问题"
        ]
        
        for query in test_queries:
            result = retriever.retrieve(query)
            print(f"\n查询: {query}")
            print(f"结果: {result.answer}")
            print(f"置信度: {result.confidence}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"\n性能统计: {stats}")