from typing import List, Dict, Any
import torch
import numpy as np
from sentence_transformers import SentenceTransformer
from .base import BaseGraphEmbedder, Node, SubGraph


class GraphEmbedder(BaseGraphEmbedder):
    """图嵌入实现"""
    
    def __init__(self):
        self.model = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
    def initialize(self, config: Dict[str, Any]) -> None:
        """初始化嵌入模型"""
        model_name = config.get("model_name", "BAAI/bge-large-zh-v1.5")
        self.model = SentenceTransformer(model_name)
        self.model.to(self.device)
        
    def embed_text(self, text: str) -> List[float]:
        """文本嵌入"""
        if not self.model:
            raise RuntimeError("模型未初始化")
            
        # 生成文本嵌入
        embedding = self.model.encode(text, convert_to_numpy=True)
        return embedding.tolist()
        
    def embed_node(self, node: Node) -> List[float]:
        """节点嵌入"""
        if not self.model:
            raise RuntimeError("模型未初始化")
            
        # 如果节点已有嵌入，直接返回
        if node.embedding is not None:
            return node.embedding
            
        # 构建节点文本表示
        text_parts = []
        
        # 添加节点类型
        text_parts.append(f"类型: {node.type}")
        
        # 添加节点属性
        for key, value in node.properties.items():
            if isinstance(value, (str, int, float, bool)):
                text_parts.append(f"{key}: {value}")
                
        # 生成节点嵌入
        text = " | ".join(text_parts)
        embedding = self.model.encode(text, convert_to_numpy=True)
        return embedding.tolist()
        
    def embed_subgraph(self, subgraph: SubGraph) -> List[float]:
        """子图嵌入"""
        if not self.model:
            raise RuntimeError("模型未初始化")
            
        # 收集所有节点的嵌入
        node_embeddings = []
        for node in subgraph.nodes:
            embedding = self.embed_node(node)
            node_embeddings.append(embedding)
            
        if not node_embeddings:
            return np.zeros(self.model.get_sentence_embedding_dimension()).tolist()
            
        # 计算注意力权重
        node_embeddings = np.array(node_embeddings)
        attention_weights = self._compute_attention_weights(node_embeddings)
        
        # 加权平均得到子图嵌入
        subgraph_embedding = np.average(node_embeddings, axis=0, weights=attention_weights)
        return subgraph_embedding.tolist()
        
    def _compute_attention_weights(self, embeddings: np.ndarray) -> np.ndarray:
        """计算注意力权重"""
        # 使用点积注意力机制
        attention_scores = np.matmul(embeddings, embeddings.T)
        
        # 应用softmax得到权重
        attention_weights = np.exp(attention_scores)
        attention_weights = attention_weights / attention_weights.sum(axis=1, keepdims=True)
        
        # 计算每个节点的最终权重
        node_weights = attention_weights.mean(axis=0)
        return node_weights 