import pandas as pd
from py2neo import Graph, Node, Relationship, Transaction
from typing import Dict
import logging
import warnings

# 禁用 Py2neo 弃用警告
warnings.filterwarnings("ignore", category=DeprecationWarning)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("EduKGBuilder")


class EduKGBuilder:
    def __init__(self, neo4j_uri: str, neo4j_user: str, neo4j_password: str):
        self.graph = Graph(neo4j_uri, auth=(neo4j_user, neo4j_password))
        self._validate_connection()

        # 初始化缓存
        self.chapter_cache: Dict[str, Node] = {}
        self.kp_cache: Dict[str, Node] = {}
        self.sub_kp_cache: Dict[str, Node] = {}

    def _validate_connection(self):
        """验证 Neo4j 连接"""
        try:
            self.graph.run("RETURN 1")
            logger.info("Neo4j 连接成功")
        except Exception as e:
            logger.critical(f"Neo4j连接失败: {str(e)}")
            raise

    def load_data(self) -> Dict[str, pd.DataFrame]:
        """数据加载与预处理"""
        required_files = {
            'chapters': ('D:/Pycharm/LunWen/AKnowledgeGrape/D-Data/chapters.csv', ['chapter_id', 'title', 'order', 'class_hours']),
            'knowledge_points': ('D:/Pycharm/LunWen/AKnowledgeGrape/D-Data/knowledge_points.csv', ['kp_id', 'chapter_id', 'description', 'bloom_level', 'class_hours', 'syllabus_mentions']),
            'sub_knowledge_points': ('D:/Pycharm/LunWen/AKnowledgeGrape/D-Data/sub_knowledge_points.csv',  ['sub_kp_id', 'kp_id', 'title', 'description', 'bloom_level', 'dependency_sub_kp', 'class_hours', 'syllabus_mentions', 'chapter_id']),
            'labs': ('D:/Pycharm/LunWen/AKnowledgeGrape/D-Data/labs.csv', ['lab_id', 'lab_name', 'related_kp'])
        }

        data = {}
        for name, (path, cols) in required_files.items():
            try:
                df = pd.read_csv(path)
                if not set(cols).issubset(df.columns):
                    raise ValueError(f"{name} CSV 缺少必要列")
                # 对 bloom_level 进行预处理：去除空格，转换为整数（如果无法转换，则设为默认值1）
                if name in ['knowledge_points', 'sub_knowledge_points']:
                    df['bloom_level'] = df['bloom_level'].apply(lambda x: int(str(x).strip()) if str(x).strip().isdigit() else 1)
                data[name] = df.fillna('').applymap(lambda x: x.strip() if isinstance(x, str) else x)
                logger.info(f"成功加载 {name}: {len(df)} 条")
            except Exception as e:
                logger.error(f"加载 {name} 失败: {str(e)}")
                raise
        return data

    def build(self):
        tx = None
        try:
            self._clear_existing_data()
            data = self.load_data()
            tx = self.graph.begin()

            self._build_chapters(tx, data['chapters'])
            self._build_course(tx)
            self._build_knowledge_points(tx, data['knowledge_points'])
            self._build_sub_knowledge_points(tx, data['sub_knowledge_points'])
            self._build_labs(tx, data['labs'])

            # 增强章节内知识点与子知识点关联
            self._enhance_chapter_associations(tx)
            # 构建用于学习路径搜索的 NEXT 关系
            self._build_path_relationships(tx)

            tx.commit()
            logger.info("全量构建完成")
        except Exception as e:
            if tx:
                tx.rollback()
            logger.critical(f"构建流程异常: {str(e)}")
            raise

    def _clear_existing_data(self):
        """安全清空现有数据"""
        try:
            self.graph.run("MATCH (n) DETACH DELETE n")
            logger.info("已清理历史数据")
        except Exception as e:
            logger.error(f"清理失败: {str(e)}")
            raise

    def _build_chapters(self, tx: Transaction, chapters: pd.DataFrame):
        """构建章节节点"""
        try:
            for _, row in chapters.iterrows():
                if not row['chapter_id']:
                    logger.error("发现空 chapter_id，跳过")
                    continue
                chapter = Node("Chapter",
                               id=row['chapter_id'],
                               chapter_id=row['chapter_id'],
                               title=row['title'],
                               order=row['order'],
                               class_hours=float(row['class_hours']))
                tx.create(chapter)
                self.chapter_cache[row['chapter_id']] = chapter
            logger.info(f"章节构建完成: {len(chapters)} 个")
        except Exception as e:
            logger.error(f"章节构建失败: {str(e)}")
            raise

    def _build_course(self, tx: Transaction):
        """构建课程节点，并与所有章节建立关联"""
        try:
            course_node = Node("Course",
                               id="COURSE001",
                               course_id="COURSE001",
                               name="大数据技术",
                               credit=3,
                               semester="2024春季")
            tx.create(course_node)
            for chapter in self.chapter_cache.values():
                tx.create(Relationship(course_node, "HAS_CHAPTER", chapter))
            logger.info("课程节点关联完成")
        except Exception as e:
            logger.error(f"课程构建失败: {str(e)}")
            raise

    def _build_knowledge_points(self, tx: Transaction, kps: pd.DataFrame):
        """构建知识点节点，并建立章节与知识点的关联"""
        try:
            for _, row in kps.iterrows():
                if not row['kp_id'] or not row['chapter_id']:
                    logger.error(f"无效知识点数据: {row.to_dict()}")
                    continue
                kp = Node("KnowledgePoint",
                          id=row['kp_id'],
                          kp_id=row['kp_id'],
                          chapter_id=row['chapter_id'],
                          title=row['description'],
                          bloom_level=row['bloom_level'],
                          class_hours=row['class_hours'],
                          syllabus_mentions=row['syllabus_mentions'])
                tx.create(kp)
                self.kp_cache[row['kp_id']] = kp

                chapter = self.chapter_cache.get(row['chapter_id'])
                if not chapter:
                    logger.error(f"章节不存在: chapter_id={row['chapter_id']}，知识点 {row['kp_id']} 未关联")
                    continue
                tx.create(Relationship(chapter, "HAS_KNOWLEDGE", kp))
            logger.info(f"知识点构建完成: {len(kps)} 个")
        except Exception as e:
            logger.error(f"知识点构建失败: {str(e)}")
            raise

    def _build_sub_knowledge_points(self, tx: Transaction, sub_kps: pd.DataFrame):
        """构建子知识点节点，并建立父子关系与依赖关系"""
        try:
            # 创建所有子知识点节点
            for _, row in sub_kps.iterrows():
                if not row['kp_id'] or not row['chapter_id']:
                    logger.error(f"子知识点 {row['sub_kp_id']} 缺少必要字段，跳过")
                    continue
                sub_kp = Node("SubKnowledgePoint",
                              id=row['sub_kp_id'],
                              sub_kp_id=row['sub_kp_id'],
                              chapter_id=row['chapter_id'],
                              title=row['title'],
                              description=row['description'],
                              bloom_level=row['bloom_level'],
                              class_hours=row['class_hours'],
                              syllabus_mentions=row['syllabus_mentions'])
                tx.create(sub_kp)
                self.sub_kp_cache[row['sub_kp_id']] = sub_kp

                chapter = self.chapter_cache.get(row['chapter_id'])
                if chapter:
                    tx.create(Relationship(sub_kp, "BELONGS_TO_CHAPTER", chapter))
                else:
                    logger.warning(f"章节不存在: chapter_id={row['chapter_id']}")
            # 建立父子及依赖关系
            for _, row in sub_kps.iterrows():
                parent_kp = self.kp_cache.get(row['kp_id'])
                if not parent_kp:
                    logger.error(f"父知识点不存在: kp_id={row['kp_id']}，子知识点 {row['sub_kp_id']} 未关联")
                    continue
                sub_kp = self.sub_kp_cache[row['sub_kp_id']]
                tx.create(Relationship(sub_kp, "CHILD_OF", parent_kp))
                tx.create(Relationship(parent_kp, "PARENT_OF", sub_kp))
                dependency_ids = str(row['dependency_sub_kp']).split(';')
                if not any(dependency_ids):
                    tx.create(Relationship(sub_kp, "ASSOCIATED_WITH", parent_kp))
                else:
                    for dep_id in filter(None, [x.strip() for x in dependency_ids]):
                        if dep_id in self.sub_kp_cache:
                            tx.create(Relationship(sub_kp, "REQUIRES_PREREQUISITE", self.sub_kp_cache[dep_id]))
                        else:
                            logger.warning(f"依赖的子知识点不存在: {dep_id}")
            logger.info(f"子知识点构建完成: {len(sub_kps)} 个")
        except Exception as e:
            logger.error(f"子知识点构建失败: {str(e)}")
            raise

    def _build_labs(self, tx: Transaction, labs: pd.DataFrame):
        """构建实验环节，并建立与知识点的关联"""
        try:
            for _, row in labs.iterrows():
                lab = Node("Lab",
                           id=row['lab_id'],
                           lab_id=row['lab_id'],
                           title=row['lab_name'])
                tx.create(lab)
                related_kps = str(row.get('related_kp', '')).split(';')
                for kp_id in filter(None, [x.strip() for x in related_kps]):
                    kp = self.kp_cache.get(kp_id)
                    if kp:
                        tx.create(Relationship(lab, "REQUIRES_KNOWLEDGE", kp))
                    else:
                        logger.warning(f"实验 {row['lab_id']} 关联不存在的知识点: {kp_id}")
                logger.info(f"实验环节构建完成，含 {len(related_kps)} 个知识点关联")
            logger.info(f"实验环节构建完成: {len(labs)} 个")
        except Exception as e:
            logger.error(f"实验构建失败: {str(e)}")
            raise

    def _enhance_chapter_associations(self, tx: Transaction):
        """
        对同一章节内的知识点和子知识点建立全连接关联，
        以增强节点间的传播和 PageRank 计算效果。
        """
        for chapter_id, chapter_node in self.chapter_cache.items():
            # 章节内知识点全连接
            chapter_kps = [kp for kp in self.kp_cache.values() if kp["chapter_id"] == chapter_id]
            if len(chapter_kps) >= 2:
                for i in range(len(chapter_kps)):
                    for j in range(i + 1, len(chapter_kps)):
                        rel = Relationship(chapter_kps[i], "RELATED_TO", chapter_kps[j])
                        if not self.graph.exists(rel):
                            tx.create(rel)
            # 章节内子知识点全连接
            chapter_sub_kps = [sub for sub in self.sub_kp_cache.values() if sub["chapter_id"] == chapter_id]
            if len(chapter_sub_kps) >= 2:
                for i in range(len(chapter_sub_kps)):
                    for j in range(i + 1, len(chapter_sub_kps)):
                        tx.create(Relationship(chapter_sub_kps[i], "RELATED_TO", chapter_sub_kps[j]))
        logger.info("章节内知识点关联增强完成")

    def _build_path_relationships(self, tx: Transaction):
        """
        构建用于学习路径搜索的关系：
         1. 创建 NEXT_CHAPTER 关系：严格按章节的 order 顺序建立。
         2. 创建 NEXT_KNOWLEDGE 关系：在同一章节内，根据知识点的 bloom_level 建立后继关系。
         3. 创建 NEXT_SUB_KNOWLEDGE 关系：在同一知识点下，根据子知识点的 bloom_level 建立后继关系。
         对于知识点，如果其教育权重较高，则设置较低的关系权重，从而在 A* 搜索中更易被选择。
        """
        # 1. NEXT_CHAPTER 关系：按章节顺序建立
        chapter_query = """
        MATCH (c:Chapter)
        WITH c ORDER BY toInteger(c.order) ASC
        WITH collect(id(c)) AS chapter_ids, collect(c) AS chapters
        UNWIND range(0, size(chapters)-2) AS i
        MATCH (c1) WHERE id(c1) = chapter_ids[i]
        MATCH (c2) WHERE id(c2) = chapter_ids[i+1]
        MERGE (c1)-[:NEXT_CHAPTER {weight:1.0}]->(c2)
        """
        tx.run(chapter_query)

        # 2. NEXT_KNOWLEDGE 关系：在同一章节内建立，按照 bloom_level 升序
        kp_query = """
        MATCH (ch:Chapter)-[:HAS_KNOWLEDGE]->(kp1:KnowledgePoint),
              (ch)-[:HAS_KNOWLEDGE]->(kp2:KnowledgePoint)
        WHERE kp1 <> kp2 AND toInteger(kp1.bloom_level) <= toInteger(kp2.bloom_level)
        MERGE (kp1)-[:NEXT_KNOWLEDGE {weight: 1.0}]->(kp2)
        """
        tx.run(kp_query)

        # 3. 修正 NEXT_SUB_KNOWLEDGE 关系的查询
        sub_kp_query = """
        MATCH (skp1:SubKnowledgePoint)-[:CHILD_OF]->(kp:KnowledgePoint),
              (skp2:SubKnowledgePoint)-[:CHILD_OF]->(kp)
        WHERE skp1 <> skp2 
          AND toInteger(skp1.bloom_level) <= toInteger(skp2.bloom_level)
        MERGE (skp1)-[:NEXT_SUB_KNOWLEDGE {weight: 1.0}]->(skp2)
        """
        tx.run(sub_kp_query)

        logger.info("路径关系构建完成")

if __name__ == "__main__":
    builder = EduKGBuilder(
        neo4j_uri="bolt://localhost:7687",
        neo4j_user="neo4j",
        neo4j_password="123456789"
    )
    builder.build()
