from typing import List, Dict, Any, Optional
import numpy as np
from .base import (
    BaseGraphSearcher,
    BaseGraphStore,
    BaseGraphEmbedder,
    GraphSearchResult,
    Node,
    SubGraph
)


class GraphSearcher(BaseGraphSearcher):
    """图搜索实现"""
    
    def __init__(self):
        self.store = None
        self.embedder = None
        self.config = None
        
    def initialize(self, config: Dict[str, Any]) -> None:
        """初始化搜索器"""
        self.config = config
        
        # 初始化存储和嵌入组件
        store_class = config["store"]["class"]
        embedder_class = config["embedder"]["class"]
        
        self.store = store_class()
        self.embedder = embedder_class()
        
        self.store.initialize(config["store"])
        self.embedder.initialize(config["embedder"])
        
    async def search(self, query: str, **kwargs) -> List[GraphSearchResult]:
        """执行图搜索"""
        if not self.store or not self.embedder:
            raise RuntimeError("搜索器未初始化")
            
        # 获取搜索参数
        node_types = kwargs.get("node_types")
        max_results = kwargs.get("max_results", 5)
        subgraph_depth = kwargs.get("subgraph_depth", 1)
        
        # 第一步：基于文本相似度搜索节点
        query_embedding = self.embedder.embed_text(query)
        candidate_nodes = self._semantic_search(query, node_types)
        
        # 第二步：扩展子图
        search_results = []
        for node in candidate_nodes:
            # 获取以当前节点为中心的子图
            subgraph = self.store.get_subgraph([node.id], depth=subgraph_depth)
            
            # 计算子图相关性分数
            score = self._compute_relevance_score(query_embedding, subgraph)
            
            # 创建搜索结果
            result = GraphSearchResult(
                subgraph=subgraph,
                score=score,
                source="graph_search",
                metadata={
                    "query": query,
                    "node_types": node_types,
                    "depth": subgraph_depth
                }
            )
            search_results.append(result)
        
        # 按分数排序并限制结果数量
        search_results.sort(key=lambda x: x.score, reverse=True)
        return search_results[:max_results]
        
    def _semantic_search(self, query: str, node_types: Optional[List[str]] = None) -> List[Node]:
        """语义搜索节点"""
        # 首先进行文本搜索
        text_nodes = self.store.search_nodes(query, node_types)
        
        if not text_nodes:
            return []
            
        # 计算查询嵌入
        query_embedding = np.array(self.embedder.embed_text(query))
        
        # 计算所有节点的相似度分数
        scored_nodes = []
        for node in text_nodes:
            # 获取或生成节点嵌入
            node_embedding = np.array(self.embedder.embed_node(node))
            
            # 计算余弦相似度
            similarity = self._cosine_similarity(query_embedding, node_embedding)
            scored_nodes.append((node, similarity))
            
        # 按相似度排序
        scored_nodes.sort(key=lambda x: x[1], reverse=True)
        
        # 返回节点列表
        return [node for node, _ in scored_nodes]
        
    def _compute_relevance_score(self, query_embedding: List[float], subgraph: SubGraph) -> float:
        """计算子图与查询的相关性分数"""
        # 获取子图嵌入
        subgraph_embedding = np.array(self.embedder.embed_subgraph(subgraph))
        query_embedding = np.array(query_embedding)
        
        # 计算相似度分数
        relevance_score = self._cosine_similarity(query_embedding, subgraph_embedding)
        
        # 考虑子图的结构特征
        structure_score = self._compute_structure_score(subgraph)
        
        # 组合分数
        alpha = self.config.get("relevance_weight", 0.7)
        final_score = alpha * relevance_score + (1 - alpha) * structure_score
        
        return float(final_score)
        
    def _compute_structure_score(self, subgraph: SubGraph) -> float:
        """计算子图的结构分数"""
        if not subgraph.nodes:
            return 0.0
            
        # 计算连通性分数
        edge_count = len(subgraph.edges)
        node_count = len(subgraph.nodes)
        connectivity = edge_count / (node_count * (node_count - 1) / 2) if node_count > 1 else 0
        
        # 计算多样性分数
        node_types = set(node.type for node in subgraph.nodes)
        edge_types = set(edge.type for edge in subgraph.edges)
        diversity = (len(node_types) + len(edge_types)) / (node_count + edge_count) if edge_count > 0 else 0
        
        # 组合分数
        structure_score = 0.6 * connectivity + 0.4 * diversity
        return float(structure_score)
        
    @staticmethod
    def _cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
        """计算余弦相似度"""
        norm_a = np.linalg.norm(a)
        norm_b = np.linalg.norm(b)
        
        if norm_a == 0 or norm_b == 0:
            return 0.0
            
        return float(np.dot(a, b) / (norm_a * norm_b)) 