import logging
import numpy as np
import networkx as nx
from py2neo import Graph

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("EduPageRankCalculator")


class EduPageRankCalculator:
    def __init__(self, neo4j_uri: str, neo4j_user: str, neo4j_password: str):
        try:
            self.graph = Graph(neo4j_uri, auth=(neo4j_user, neo4j_password))
            self.graph.run("RETURN 1")
            logger.info("Neo4j 连接成功")
        except Exception as e:
            logger.critical(f"Neo4j连接失败: {e}")
            raise

        self.nodes = {}
        self.edges = []

    def fetch_data(self):
        """
        加载所有核心知识点节点：
          - 包括 KnowledgePoint 和 SubKnowledgePoint；
          - 如果节点具有 kp_id 或 sub_kp_id，则用其作为业务ID；
          - 如果没有，则尝试使用 chapter_id（但通常核心知识点应有 kp_id 或 sub_kp_id）。
        """
        node_query = """
        MATCH (n)
        WHERE n:KnowledgePoint OR n:SubKnowledgePoint
        RETURN 
            CASE 
                WHEN n:KnowledgePoint THEN n.kp_id
                WHEN n:SubKnowledgePoint THEN n.sub_kp_id
            END AS id,
            coalesce(n.class_hours, 1) AS class_hours,
            coalesce(n.syllabus_mentions, 1) AS syllabus_mentions,
            coalesce(n.bloom_level, 1) AS bloom_level,
            coalesce(n.title, n.name, n.kp_id, n.sub_kp_id, "节点"+toString(id(n))) AS name
        """
        node_records = self.graph.run(node_query)
        for record in node_records:
            node_id = record["id"]
            self.nodes[node_id] = {
                "class_hours": float(record["class_hours"]),
                "syllabus_mentions": int(record["syllabus_mentions"]),
                "bloom_level": min(int(record["bloom_level"]), 6),
                "name": record["name"]
            }

        # 边查询：只考虑 KnowledgePoint 和 SubKnowledgePoint 的关系
        edge_query = """
        MATCH (a)-[r]->(b)
        WHERE (a:KnowledgePoint OR a:SubKnowledgePoint)
          AND (b:KnowledgePoint OR b:SubKnowledgePoint)
        RETURN 
            CASE 
                WHEN a:KnowledgePoint THEN a.kp_id
                WHEN a:SubKnowledgePoint THEN a.sub_kp_id
            END AS source,
            CASE 
                WHEN b:KnowledgePoint THEN b.kp_id
                WHEN b:SubKnowledgePoint THEN b.sub_kp_id
            END AS target
        """
        edge_records = self.graph.run(edge_query)
        for record in edge_records:
            self.edges.append((record["source"], record["target"]))
        logger.info(f"加载节点数: {len(self.nodes)}, 边数: {len(self.edges)}")

    def compute_edu_pagerank(self, alpha=0.85, max_iter=100, tol=1e-6):
        """
        根据教学指标计算静态权重：
          - class_hours 与 syllabus_mentions 归一化到 [0,1]；
          - bloom_level 归一化为 (7 - bloom_level)/6，使 L1→1.0，L6→约0.1667；
          - 各指标采用加权求和方式。
        """
        G = nx.DiGraph()
        G.add_nodes_from(self.nodes.keys())
        G.add_edges_from(self.edges)
        N = G.number_of_nodes()
        if N == 0:
            logger.error("无节点数据")
            return

        max_syllabus = max(data["syllabus_mentions"] for data in self.nodes.values()) or 1
        max_hours = max(data["class_hours"] for data in self.nodes.values()) or 1

        for node in G.nodes:
            data = self.nodes[node]
            # 对 syllabus_mentions 使用对数缩放
            syllabus_norm = np.log1p(data["syllabus_mentions"]) / np.log1p(max_syllabus)
            hours_norm = data["class_hours"] / max_hours if max_hours else 0
            bloom_norm = (7 - data["bloom_level"]) / 6.0
            static_weight = float(0.5 * hours_norm + 0.3 * syllabus_norm + 0.2 * bloom_norm)
            G.nodes[node]["static_weight"] = static_weight
            G.nodes[node]["edu_pr"] = 1.0 / N

        for iter_count in range(max_iter):
            new_pr = {n: (1 - alpha) / N + alpha * sum(
                G.nodes[pred]["static_weight"] * G.nodes[pred]["edu_pr"] /
                (G.out_degree(pred) if G.out_degree(pred) > 0 else 1)
                for pred in G.predecessors(n)) for n in G.nodes}
            total = sum(new_pr.values())
            for n in new_pr:
                new_pr[n] /= total
            diff = max(abs(new_pr[n] - G.nodes[n]["edu_pr"]) for n in G.nodes)
            for n in G.nodes:
                G.nodes[n]["edu_pr"] = new_pr[n]
            logger.info(f"Iter {iter_count + 1} | Diff: {diff:.8f}")
            if diff < tol:
                logger.info("算法收敛")
                break

        for node in G.nodes:
            self.nodes[node]["edu_pr"] = G.nodes[node]["edu_pr"]

    def write_results_to_neo4j(self):
        """
        将计算得到的教育版 PageRank 写入 Neo4j，
        并根据前20%的 PageRank 分值标记为核心知识点。
        """
        # 提取所有 PageRank 分值并排序
        pr_values = [v["edu_pr"] for v in self.nodes.values()]
        pr_values.sort(reverse=True)

        # 设置阈值为前 20% 的 PageRank 值
        threshold_index = max(int(len(pr_values) * 0.2) - 1, 0)
        threshold_value = pr_values[threshold_index] if pr_values else 0

        logger.info(f"核心知识点阈值: {threshold_value:.6f}")

        # 构造写入数据，包含是否为核心知识点标志位
        data = []
        for node_id, info in self.nodes.items():
            data.append({
                "id": node_id,
                "edu_pr": round(info["edu_pr"], 6),
                "is_core_kp": info["edu_pr"] >= threshold_value
            })

        # Neo4j 更新查询语句
        query = """
        UNWIND $data AS row
        MATCH (n)
        WHERE ( (n:KnowledgePoint AND n.kp_id = row.id)
                OR (n:SubKnowledgePoint AND n.sub_kp_id = row.id) )
        SET n.edu_pagerank = row.edu_pr,
            n.is_core_kp = row.is_core_kp
        """

        self.graph.run(query, data=data)
        logger.info("教育PageRank及核心标志写入完成")

    def run(self):
        self.fetch_data()
        self.compute_edu_pagerank()
        self.write_results_to_neo4j()


if __name__ == "__main__":
    calc = EduPageRankCalculator("bolt://localhost:7687", "neo4j", "123456789")
    calc.run()
