#!/usr/bin/env python
# -*- coding: utf-8 -*-

import logging
from typing import Dict, List, Any, Optional, Set

from vector_store.vector_store_interface import VectorStoreInterface
from vector_store.factory import VectorStoreFactory
from vector_store.node_vector_processor import NodeVectorProcessor
from graph.graph_connection import GraphConnection, NebulaGraphConnection
from parser.utils import ContentCompressor

# 获取logger
logger = logging.getLogger(__name__)

class ContextRetriever:
    """
    上下文检索器，用于根据查询文本检索相关代码上下文
    """

    def __init__(self, 
                 vector_store: VectorStoreInterface,
                 graph_connection: GraphConnection):
        """
        初始化上下文检索器
        
        Args:
            vector_store: 向量存储接口
            graph_connection: 图数据库连接
        """
        self.vector_store = vector_store
        self.graph_connection = graph_connection
        self.node_processor = NodeVectorProcessor(vector_store=vector_store)
        self.content_compressor = ContentCompressor()
    
    def retrieve_context(self, query_text: str, top_k: int = 3) -> Dict[str, Any]:
        """
        根据查询文本检索相关上下文
        
        Args:
            query_text: 查询文本
            top_k: 每种节点类型返回的最大结果数
            
        Returns:
            包含相关上下文的字典
        """
        logger.info(f"开始检索查询文本的相关上下文: {query_text}")
        
        # 存储所有检索到的节点
        all_nodes = {}
        # 存储所有已处理的节点ID，避免重复
        processed_ids = set()
        
        # 1. 从向量数据库中检索相似的class节点
        class_nodes = self.node_processor.search_similar_nodes(
            query_text=query_text,
            node_type="class",
            top_k=top_k
        )
        logger.debug(f"检索到 {len(class_nodes)} 个相似class节点")
        
        # 2. 从向量数据库中检索相似的function节点
        function_nodes = self.node_processor.search_similar_nodes(
            query_text=query_text,
            node_type="function",
            top_k=top_k
        )
        logger.debug(f"检索到 {len(function_nodes)} 个相似function节点")
        
        # 3. 从向量数据库中检索相似的annotation节点
        annotation_nodes = self.node_processor.search_similar_nodes(
            query_text=query_text,
            node_type="annotation",
            top_k=top_k
        )
        logger.debug(f"检索到 {len(annotation_nodes)} 个相似annotation节点")
        
        # 4. 处理所有检索到的节点及其关联节点
        for node in class_nodes + function_nodes + annotation_nodes:
            node_id = node.get("vid")
            if node_id and node_id not in processed_ids:
                # 从图数据库中获取完整的节点信息
                graph_node = self._get_graph_node_by_id(node_id)
                if graph_node:
                    # 处理当前节点
                    self._process_node(graph_node, all_nodes, processed_ids)
                    
                    # 获取与当前节点相关的节点
                    related_nodes = self._get_related_nodes(node_id)
                    
                    # 处理相关节点
                    for related_node in related_nodes:
                        related_id = related_node.get("id")
                        if related_id and related_id not in processed_ids:
                            self._process_node(related_node, all_nodes, processed_ids)
        
        # todo 魏燕芬 all_nodes 需要截断
        # 下周：32k上下文，对应的是16k的大小，按16k截断
        # 这个月：
        # 1.针对单次回答做排序
        # 2.根据rank做排序

        # 5. 构建返回结果
        result = {
            "query": query_text,
            "nodes": all_nodes,
            "total_nodes": len(all_nodes)
        }
        
        logger.info(f"检索完成，共找到 {len(all_nodes)} 个相关节点")
        return result
    
    def _process_node(self, node: Dict[str, Any], all_nodes: Dict[str, Any], processed_ids: Set[str]) -> None:
        """
        处理单个节点，解压内容并添加到结果中
        
        Args:
            node: 节点信息
            all_nodes: 存储所有节点的字典
            processed_ids: 已处理节点ID集合
        """
        node_id = node.get("vid") or node.get("id")
        if not node_id:
            return
        
        # 标记为已处理
        processed_ids.add(node_id)
        
        # 解压内容
        if "content" in node and node["content"]:
            try:
                node["decoded_content"] = self.content_compressor.decompress(node["content"])
            except Exception as e:
                logger.warning(f"解压节点内容失败: {str(e)}")
                node["decoded_content"] = "无法解压内容"
        
        # 添加到结果中
        all_nodes[node_id] = node
    
    def _get_related_nodes(self, node_id: str) -> List[Dict[str, Any]]:
        """
        获取与指定节点相关的节点（只查找一层路径）
        
        Args:
            node_id: 节点ID
            
        Returns:
            相关节点列表
        """
        related_nodes = []
        
        try:
            space_name = self.graph_connection.config.get('space', 'projectx')
            
            # 分别查询三种类型的节点
            node_types = ["class", "function", "comment"]
            
            # todo 魏燕芬 去掉limit
            for node_type in node_types:
                # 针对每种节点类型构建查询
                query = f"""
                USE {space_name};
                MATCH (n)-[r]-(m:{node_type})
                WHERE id(n) == "{node_id}"
                RETURN id(m) AS id, m.{node_type}.name AS name, m.{node_type}.full_name AS full_name, 
                       "{node_type}" AS type, m.{node_type}.content AS content, 
                       m.{node_type}.visibility AS visibility,
                       type(r) AS relation_type
                LIMIT 3
                """
                
                success, result = self.graph_connection.execute_query(query)
                
                if success and hasattr(result, 'rows'):
                    for row in result.rows():
                        if row.values[2].value==0 or row.values[4].value==0:
                            continue;
                        # 将查询结果转换为字典
                        node_data = {
                            "id": row.values[0].value.decode('utf-8'),
                            "name": row.values[1].value.decode('utf-8'),
                            "full_name": row.values[2].value.decode('utf-8'),
                            "type": row.values[3].value.decode('utf-8'),
                            "content": row.values[4].value.decode('utf-8'),
                            "visibility": row.values[5].value.decode('utf-8'),
                            "relation_type": row.values[6].value.decode('utf-8')
                            # todo 魏燕芬 "rank": 
                        }
                        related_nodes.append(node_data)
                else:
                    logger.debug(f"查询{node_type}类型相关节点: {result if not success else '无结果'}")
        
        except Exception as e:
            logger.error(f"获取相关节点时发生错误: {str(e)}")
        
        return related_nodes

    def _get_graph_node_by_id(self, node_id: str) -> Optional[Dict[str, Any]]:
        """
        根据节点ID从图数据库中获取完整的节点信息
        
        Args:
            node_id: 节点ID
            
        Returns:
            节点信息字典，若未找到则返回None
        """
        try:
            space_name = self.graph_connection.config.get('space', 'projectx')
            
            # 先尝试查询节点类型
            type_query = f"""
            USE {space_name};
            MATCH (n) 
            WHERE id(n) == "{node_id}"
            RETURN labels(n) AS node_type
            """
            
            success, type_result = self.graph_connection.execute_query(type_query)
            
            if not success or not hasattr(type_result, 'rows') or len(type_result.rows()) == 0:
                logger.warning(f"无法确定节点 {node_id} 的类型")
                return None
            
            # 获取节点类型
            node_type = type_result.rows()[0].values[0].value.values[0].value
            if node_type != 0:
                node_type = node_type.decode('utf-8')  # 获取第一个标签

            # 直接查询所需的属性
            query = f"""
            USE {space_name};
            MATCH (n) 
            WHERE id(n) == "{node_id}"
             RETURN id(n) AS id, 
                   n.{node_type}.name AS name, 
                   n.{node_type}.full_name AS full_name, 
                   "{node_type}" AS type, 
                   n.{node_type}.content AS content, 
                   n.{node_type}.visibility AS visibility
            """
            
            success, result = self.graph_connection.execute_query(query)
            
            if success and hasattr(result, 'rows') and len(result.rows()) > 0:
                row = result.rows()[0]

                if row.values[2].value==0 or row.values[4].value==0:
                    return None
                
                # 构建节点数据
                node_data = {
                    "vid": row.values[0].value.decode('utf-8'),   # id
                    "name": row.values[1].value.decode('utf-8'),  # name
                    "full_name": row.values[2].value.decode('utf-8'),  # full_name
                    "type": row.values[3].value.decode('utf-8'),  # type
                    "content": row.values[4].value.decode('utf-8'),  # content
                    "visibility": row.values[5].value.decode('utf-8')  # visibility
                }
                
                return node_data
            else:
                logger.warning(f"在图数据库中未找到节点 {node_id}")
                return None
                
        except Exception as e:
            logger.error(f"获取图数据库节点时发生错误: {str(e)}")
            return None
