import logging
import os
import json
import pickle
from datetime import datetime
from typing import Dict, List

from neo4j import GraphDatabase
from neo4j.exceptions import Neo4jError
from collections import defaultdict
from tqdm.auto import tqdm

# 配置日志
logging.basicConfig(
    format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    level=logging.INFO,
)
logger = logging.getLogger(__name__)


class NeoGraph:
    """
    Neo4j 图数据库操作的基类
    """

    def __init__(self, uri, user, password, cache_dir=None, batch_size=2000, active_graph_id: str = None):
        """
        初始化 Neo4j 图数据库连接
        
        参数:
            uri: Neo4j数据库URI
            user: Neo4j用户名
            password: Neo4j密码
            cache_dir: 缓存目录，如果指定则启用缓存
            batch_size: 批处理大小
            active_graph_id: 此实例查询时默认操作的图ID。
        """
        self.driver = GraphDatabase.driver(uri, auth=(user, password))
        self.batch_size = batch_size
        self.cache_dir = cache_dir
        self.active_graph_id = active_graph_id

        # 核心数据结构
        self.nodes = {}  # 存储所有节点 {entity_id: {"labels": [], "properties": {}}}
        self.relationships = []  # 存储所有关系 [(src_id, dst_id, type, properties)]

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()

    def close(self):
        """关闭数据库连接"""
        if self.driver:
            self.driver.close()
            logger.info("Neo4j连接已关闭")

    def save_graph_to_cache(self, graph_id):
        """
        将图的核心结构（节点和关系）保存到缓存的PKL文件。
        注意：此方法在基类中只保存`self.nodes`和`self.relationships`。
        子类如SystemDependencyGraphBuilder应覆盖此方法以保存更完整的数据。
        
        参数:
            graph_id: 图ID
        
        返回:
            bool: 保存成功返回True，否则返回False
        """
        if not self.cache_dir:
            logger.warning("Cache directory not set. Skipping save_graph_to_cache.")
            return False

        try:
            # 确保缓存目录存在
            os.makedirs(self.cache_dir, exist_ok=True)

            cache_file_path = os.path.join(self.cache_dir, f"graph_data_{graph_id}.pkl")

            # 保存节点和关系数据到pickle文件
            graph_data = {
                "nodes": self.nodes,
                "relationships": self.relationships,
                "saved_at": datetime.now().isoformat(),
                "graph_id": graph_id
            }

            with open(cache_file_path, "wb") as f:
                pickle.dump(graph_data, f)

            logger.info(f"图结构核心数据已保存到缓存: {cache_file_path}")
            return True

        except Exception as e:
            logger.error(f"保存图结构核心数据到缓存时发生错误: {e}")
            return False

    def load_graph_from_cache(self, graph_id):
        """
        从缓存的PKL文件加载图的核心结构（节点和关系）。
        注意：此方法在基类中只加载`self.nodes`和`self.relationships`。
        子类如SystemDependencyGraphBuilder应覆盖此方法以加载更完整的数据。
        
        参数:
            graph_id: 图ID
        
        返回:
            bool: 加载成功返回True，否则返回False
        """
        if not self.cache_dir:
            logger.warning("Cache directory not set. Skipping load_graph_from_cache.")
            return False

        try:
            cache_file_path = os.path.join(self.cache_dir, f"graph_data_{graph_id}.pkl")

            if not os.path.exists(cache_file_path):
                logger.info(f"缓存文件不存在: {cache_file_path}")
                return False

            # 加载图数据
            with open(cache_file_path, "rb") as f:
                graph_data = pickle.load(f)

            # 更新内存中的数据结构
            self.nodes = graph_data.get("nodes", {})
            self.relationships = graph_data.get("relationships", [])
            
            loaded_graph_id = graph_data.get("graph_id")
            saved_at = graph_data.get("saved_at")
            logger.info(f"成功从缓存文件 {cache_file_path} 加载图结构核心数据 (graph_id: {loaded_graph_id}, saved_at: {saved_at})")
            return True

        except Exception as e:
            logger.error(f"从缓存加载图结构核心数据时发生错误: {e}")
            return False

    def _add_node(self, node_id, labels, properties):
        """添加节点到内存结构"""
        if node_id is None or str(node_id).lower() == 'nan':
            return

        node_id = str(node_id)  # 确保节点ID是字符串
        if node_id not in self.nodes:
            self.nodes[node_id] = {"labels": labels, "properties": properties}

    def _add_relationship(self, src_id, dst_id, rel_type, properties):
        """添加关系到内存结构"""
        if src_id is None or dst_id is None or str(src_id).lower() == 'nan' or str(dst_id).lower() == 'nan':
            return

        src_id = str(src_id)  # 确保节点ID是字符串
        dst_id = str(dst_id)  # 确保节点ID是字符串
        self.relationships.append((src_id, dst_id, rel_type, properties))

    def _clear_existing_graph(self, session, graph_id):
        """
        删除图中已存在的相同graph_id的节点和边
        
        参数:
            session: Neo4j会话
            graph_id: 图ID
        """
        logger.info(f"开始清除图中已存在的graph_id={graph_id}的节点和边...")

        try:
            # 先删除关系
            query_delete_relationships = """
            MATCH ()-[r {graph_id: $graph_id}]->()
            DELETE r
            """
            result = session.run(query_delete_relationships, {"graph_id": graph_id})
            rel_count = result.consume().counters.relationships_deleted
            logger.info(f"删除了 {rel_count} 个关系")

            # 再删除节点 - 所有属于此graph_id的节点都应该有一个graph_id属性
            query_delete_nodes = """
            MATCH (n {graph_id: $graph_id})
            DETACH DELETE n
            """
            result = session.run(query_delete_nodes, {"graph_id": graph_id})
            node_count = result.consume().counters.nodes_deleted
            logger.info(f"删除了 {node_count} 个节点")

        except Neo4jError as e:
            logger.error(f"删除已有节点和边时发生错误: {e}")

    def _create_indices(self, session):
        """创建必要的索引，由子类实现"""
        pass

    def _batch_create_nodes(self, session, graph_id):
        """批量创建节点"""
        logger.info("开始批量创建节点...")

        # 按标签分组节点
        nodes_by_label = defaultdict(list)
        for node_id, node_data in self.nodes.items():
            label = node_data["labels"][0]  # 使用第一个标签作为分组
            nodes_by_label[label].append({
                "id": node_id,
                "properties": node_data["properties"]
            })

        # 为每种标签批量创建节点
        for label, nodes in nodes_by_label.items():
            for i in range(0, len(nodes), self.batch_size):
                batch = nodes[i:i + self.batch_size]
                query = f"""
                UNWIND $nodes AS node
                MERGE (n:{label} {{id: node.id, graph_id: $graph_id}})
                SET n += node.properties
                """
                try:
                    session.run(query, {"nodes": batch, "graph_id": graph_id})
                except Neo4jError as e:
                    logger.error(f"批量创建{label}节点时发生错误: {e}")

        logger.info(f"节点创建完成，共 {len(self.nodes)} 个节点")

    def _batch_create_relationships(self, session, graph_id):
        """批量创建关系"""
        logger.info("开始批量创建关系...")

        # 按关系类型分组
        rels_by_type = defaultdict(list)
        for src_id, dst_id, rel_type, properties in self.relationships:
            rels_by_type[rel_type].append({
                "src": src_id,
                "dst": dst_id,
                "properties": properties
            })

        # 为每种关系类型批量创建关系
        for rel_type, rels in rels_by_type.items():
            for i in range(0, len(rels), self.batch_size):
                batch = rels[i:i + self.batch_size]
                query = f"""
                UNWIND $rels AS rel
                MATCH (src {{id: rel.src, graph_id: $graph_id}})
                MATCH (dst {{id: rel.dst, graph_id: $graph_id}})
                MERGE (src)-[r:{rel_type} {{graph_id: $graph_id}}]->(dst)
                SET r += rel.properties
                """
                try:
                    session.run(query, {"rels": batch, "graph_id": graph_id})
                except Neo4jError as e:
                    logger.error(f"批量创建{rel_type}关系时发生错误: {e}")

        logger.info(f"关系创建完成，共 {len(self.relationships)} 个关系")

    def build_neo4j_graph(self, graph_id):
        """
        将内存中的图结构写入Neo4j数据库
        
        参数:
            graph_id: 图ID
        """
        with self.driver.session() as session:
            # 删除已有的相同graph_id的节点和边
            self._clear_existing_graph(session, graph_id)

            # 创建索引
            self._create_indices(session)

            # 批量创建节点
            self._batch_create_nodes(session, graph_id)

            # 批量创建关系
            self._batch_create_relationships(session, graph_id)

    def get_pod_interfaces(self) -> Dict[str, List[str]]:
        """
        获取当前 active_graph_id 下 Pod 到接口的映射关系。
        key是Pod名称，value是该Pod暴露的所有接口路径列表
        {
            "pod-order-service": ["/api/v1/orders", "/api/v2/payments"],
            "pod-user-service": ["/api/v1/users"]
        }
        """
        if not self.active_graph_id:
            logger.error("active_graph_id 未设置，无法执行 get_pod_interfaces")
            raise ValueError("active_graph_id is not set for querying pod interfaces.")
            
        query = """
        MATCH (pod:Container {graph_id: $g_id})-[:PROVIDES]->(interface:Service {graph_id: $g_id})
        RETURN pod.id AS pod, COLLECT(DISTINCT interface.id) AS interfaces
        """
        result = self.execute_cypher(query, params={"g_id": self.active_graph_id})
        return {record["pod"]: record["interfaces"] for record in result}

    def get_interface_dependencies(self, reverse: bool = False) -> Dict[str, List[str]]:
        """
        构建当前 active_graph_id 下接口调用依赖关系图。

        Args:
            reverse: 是否返回反向依赖（被调用关系）
        """
        if not self.active_graph_id:
            logger.error("active_graph_id 未设置，无法执行 get_interface_dependencies")
            raise ValueError("active_graph_id is not set for querying interface dependencies.")

        if reverse:
            query = """
            MATCH (src:Service {graph_id: $g_id})-[:CALLS]->(dst:Service {graph_id: $g_id})
            RETURN dst.id AS interface, COLLECT(DISTINCT src.id) AS dependencies
            """
        else:
            query = """
            MATCH (src:Service {graph_id: $g_id})-[:CALLS]->(dst:Service {graph_id: $g_id})
            RETURN src.id AS interface, COLLECT(DISTINCT dst.id) AS dependencies
            """

        result = self.execute_cypher(query, params={"g_id": self.active_graph_id})
        return {record["interface"]: record["dependencies"] for record in result}

    def execute_cypher(self, query: str, params: dict = None) -> List[dict]:
        """执行Cypher查询并返回结果"""
        with self.driver.session() as session:
            try:
                result = session.run(query, params or {})
                return [dict(record) for record in result]
            except Neo4jError as e:
                logger.error(f"Cypher execution failed: {e}")
                raise
