from typing import Optional, List, Dict, Any
import numpy as np
from scorpio.core.common import get_logger
from scorpio.core.database import VectorDatabaseManager

logger = get_logger(__name__)

class QueryProcessor:
    def __init__(self, db_manager: VectorDatabaseManager):
        logger.debug("initialize QueryProcessorService")
        self.db_manager = db_manager

    async def create_table(self):
        """创建查询相关的表结构"""
        async with self.db_manager.get_connection() as conn:
            # 创建查询-文档关联表（增强查询功能）
            await conn.execute('''
                CREATE TABLE IF NOT EXISTS query_document_relations (
                    id BIGSERIAL PRIMARY KEY,
                    query_id BIGINT NOT NULL REFERENCES query_history(id) ON DELETE CASCADE,
                    chunk_id BIGINT NOT NULL REFERENCES chunks(id) ON DELETE CASCADE,
                    similarity FLOAT NOT NULL,
                    created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
                    
                    CONSTRAINT unique_query_chunk UNIQUE(query_id, chunk_id)
                );
            ''')
            logger.info("query_document_relations table created/checked")
            
            # 创建查询索引优化表
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_query_chunk_relations_query_id 
                ON query_document_relations(query_id);
            ''')
            await conn.execute('''
                CREATE INDEX IF NOT EXISTS idx_query_chunk_relations_chunk_id 
                ON query_document_relations(chunk_id);
            ''')
            logger.info("query_chunk_relations indexes created/checked")

    async def similarity_search(
        self,
        query_embedding: np.ndarray,
        top_k: Optional[int] = 5,
        document_ids: Optional[List[int]] = None,
        similarity_threshold: Optional[float] = 0.001
    ) -> List[Dict[str, Any]]:
        """执行向量相似度搜索
        
        Args:
            query_embedding: 查询向量
            top_k: 返回的最大结果数
            document_ids: 可选的文档ID过滤列表
            similarity_threshold: 相似度阈值
            
        Returns:
            相似文档块列表
        """
        
        async with self.db_manager.get_connection() as conn:
            base_query = '''
                SELECT 
                    c.id,
                    c.content,
                    c.metadata,
                    d.title as document_title,
                    d.external_id,
                    c.embedding <=> $1 as distance,
                    1 - (c.embedding <=> $1) as similarity
                FROM chunks c
                JOIN documents d ON c.document_id = d.id
            '''
            
            where_conditions = []
            params = [query_embedding]
            param_count = 1
            
            if document_ids:
                param_count += 1
                where_conditions.append(f"d.id = ANY(${param_count})")
                params.append(document_ids)
            
            if where_conditions:
                base_query += " WHERE " + " AND ".join(where_conditions)
            
            # 添加相似度阈值和排序
            base_query += f'''
                AND (1 - (c.embedding <=> $1)) > {similarity_threshold}
                ORDER BY c.embedding <=> $1
                LIMIT {top_k}
            '''
            
            logger.debug(f"similarity_search query: {base_query} with params: {params}")
            try:
                records = await conn.fetch(base_query, *params)
                logger.info(f"similarity search record number: {len(records)}.")
            except Exception as e:
                logger.error(f"similarity_search query error: {e}")
                raise e
            return [
                {
                    'id': record['id'],
                    'content': record['content'],
                    'metadata': record['metadata'],
                    'document_title': record['document_title'],
                    'external_id': str(record['external_id']),
                    'distance': float(record['distance']),
                    'similarity': float(record['similarity'])
                }
                for record in records
            ]
    
    async def hybrid_search(
        self,
        query_text: str,
        query_embedding: np.ndarray,
        top_k: Optional[int] = 5,
        keyword_weight: float = 0.3,
        vector_weight: float = 0.7
    ) -> List[Dict[str, Any]]:
        """执行混合搜索（向量+关键词）
        
        Args:
            query_text: 查询文本
            query_embedding: 查询向量
            top_k: 返回的最大结果数
            keyword_weight: 关键词搜索权重
            vector_weight: 向量搜索权重
            
        Returns:
            混合排序后的文档块列表
        """
            
        # 执行向量搜索
        vector_results = await self.similarity_search(query_embedding, top_k=top_k * 2)
        
        # 执行关键词搜索
        async with self.db_manager.get_connection() as conn:
            # 使用PostgreSQL全文搜索
            keyword_query = '''
                SELECT 
                    dc.id,
                    dc.content,
                    dc.metadata,
                    d.title as document_title,
                    d.external_id,
                    ts_rank(to_tsvector('english', dc.content), plainto_tsquery('english', $1)) as rank
                FROM chunks dc
                JOIN documents d ON dc.document_id = d.id
                WHERE to_tsvector('english', dc.content) @@ plainto_tsquery('english', $1)
                ORDER BY rank DESC
                LIMIT $2
            '''
            
            keyword_records = await conn.fetch(keyword_query, query_text, top_k * 2)
            
            keyword_results = [
                {
                    'id': record['id'],
                    'content': record['content'],
                    'metadata': record['metadata'],
                    'document_title': record['document_title'],
                    'external_id': str(record['external_id']),
                    'keyword_rank': float(record['rank'])
                }
                for record in keyword_records
            ]
        
        # 合并和混合排序结果
        # 创建ID到结果的映射
        result_map = {}
        
        # 添加向量搜索结果
        for result in vector_results:
            result_id = result['id']
            result_map[result_id] = {
                **result,
                'final_score': result['similarity'] * vector_weight
            }
        
        # 添加关键词搜索结果并混合分数
        for result in keyword_results:
            result_id = result['id']
            if result_id in result_map:
                # 如果已在向量结果中，混合分数
                result_map[result_id]['keyword_rank'] = result['keyword_rank']
                result_map[result_id]['final_score'] += result['keyword_rank'] * keyword_weight
            else:
                # 新的结果，只有关键词分数
                result_map[result_id] = {
                    **result,
                    'similarity': 0.0,
                    'distance': 1.0,
                    'final_score': result['keyword_rank'] * keyword_weight
                }
        
        # 按最终分数排序并返回
        final_results = sorted(result_map.values(), key=lambda x: x['final_score'], reverse=True)
        return final_results[:top_k]
    
    async def get_related_documents(
        self,
        document_id: int,
        top_k: int = 5
    ) -> List[Dict[str, Any]]:
        """获取与指定文档相关的其他文档
        
        Args:
            document_id: 文档ID
            top_k: 返回的最大结果数
            
        Returns:
            相关文档列表
        """
        async with self.db_manager.get_connection() as conn:
            query = '''
                SELECT 
                    d.id,
                    d.external_id,
                    d.title,
                    d.content_type,
                    COUNT(DISTINCT qdr.query_id) as common_query_count
                FROM documents d
                JOIN chunks dc ON d.id = dc.document_id
                JOIN query_document_relations qdr ON dc.id = qdr.chunk_id
                WHERE qdr.query_id IN (
                    SELECT qdr2.query_id 
                    FROM query_document_relations qdr2
                    JOIN chunks dc2 ON qdr2.chunk_id = dc2.id
                    WHERE dc2.document_id = $1
                )
                AND d.id != $1
                GROUP BY d.id
                ORDER BY common_query_count DESC
                LIMIT $2
            '''
            
            records = await conn.fetch(query, document_id, top_k)
            
            return [
                {
                    'id': record['id'],
                    'external_id': record['external_id'],
                    'title': record['title'],
                    'content_type': record['content_type'],
                    'common_query_count': record['common_query_count']
                }
                for record in records
            ]
    
    async def analyze_query_performance(self) -> Dict[str, Any]:
        """分析查询性能和检索质量
        
        Returns:
            查询性能统计信息
        """
        async with self.db_manager.get_connection() as conn:
            # 基本查询性能统计
            performance_stats = await conn.fetchrow('''
                SELECT 
                    COUNT(*) as total_queries,
                    AVG(response_time_ms) as avg_response_time,
                    MAX(response_time_ms) as max_response_time,
                    MIN(response_time_ms) as min_response_time,
                    AVG(context_count) as avg_context_count
                FROM query_history
            ''')
            
            # 检索质量统计
            quality_stats = await conn.fetchrow('''
                SELECT 
                    AVG(similarity) as avg_similarity,
                    MIN(similarity) as min_similarity,
                    MAX(similarity) as max_similarity,
                    COUNT(*) as total_relations
                FROM query_document_relations
            ''')
            
            return {
                'total_queries': performance_stats['total_queries'],
                'avg_response_time_ms': float(performance_stats['avg_response_time']) if performance_stats['avg_response_time'] else 0,
                'max_response_time_ms': performance_stats['max_response_time'],
                'min_response_time_ms': performance_stats['min_response_time'],
                'avg_context_count': float(performance_stats['avg_context_count']) if performance_stats['avg_context_count'] else 0,
                'avg_similarity': float(quality_stats['avg_similarity']) if quality_stats['avg_similarity'] else 0,
                'min_similarity': float(quality_stats['min_similarity']) if quality_stats['min_similarity'] else 0,
                'max_similarity': float(quality_stats['max_similarity']) if quality_stats['max_similarity'] else 0,
                'total_relations': quality_stats['total_relations']
            }