import os
import jieba
import numpy as np
import threading
import json
import re
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import Docx2txtLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from openai import OpenAI
from typing import List, Dict, Tuple, Optional
from neo4j import GraphDatabase
from concurrent.futures import ThreadPoolExecutor, as_completed

# 配置环境变量和文件路径
os.environ['TRANSFORMERS_OFFLINE'] = '1'
os.environ['HF_DATASETS_OFFLINE'] = '1'

# --- 用户配置区域 ---

# 请在此处配置您的所有章节文件路径
# 注意：路径中的反斜杠 \ 最好使用双反斜杠 \\ 或者正斜杠 /
KNOWLEDGE_FILES = [
    "D:/桌面/大数据应用基础教程/第一章大数据概述.txt",
    "D:/桌面/大数据应用基础教程/第二章python及常用类库.txt",
    "D:/桌面/大数据应用基础教程/第三章数据获取.txt",
    "D:/桌面/大数据应用基础教程/第四章数据存储.txt",
    "D:/桌面/大数据应用基础教程/第五章数据预处理.txt",
    "D:/桌面/大数据应用基础教程/第六章数据可视化.txt",
    "D:/桌面/大数据应用基础教程/第七章数据分析方法.txt",
    "D:/桌面/大数据应用基础教程/第八章linux操作系统基础.txt",
    "D:/桌面/大数据应用基础教程/第九章大数据管理平台.txt",
    "D:/桌面/大数据应用基础教程/第十章分布式存储.txt",
    "D:/桌面/大数据应用基础教程/第十一章分布式处理.txt"
]

# 请在此处配置您的HuggingFace嵌入模型的本地路径
EMBEDDING_MODEL_PATH = "D:/local_models/all-MiniLM-L6-v2"

# 请在此处配置您的API密钥和基础URL
# 使用阿里云Dashscope的示例
API_KEY = "sk-faa84a6e39ed4a78a90b49b8fb811bfc"
BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
MODEL_NAME = "deepseek-v3"  # 您希望使用的模型

# Neo4j 数据库连接配置
NEO4J_URI = "bolt://localhost:7687"
NEO4J_USER = "neo4j"
NEO4J_PASSWORD = "12345678"
DATABASE_NAME = "tiaozhanbei"

# 考试配置
TOTAL_QUESTIONS = 5
TOTAL_SCORE = 50

# --- 程序核心代码 ---

# 将缓存目录设置为绝对路径
CACHE_DIR = "C:/temp/tiaozhanbei_cache"
os.makedirs(CACHE_DIR, exist_ok=True)


class KnowledgeBaseProcessor:
    def __init__(self, knowledge_files: list, vector_store_path: str):
        self.knowledge_files = knowledge_files
        self.vector_store = None
        self.documents = []
        self.keywords = None
        self.vector_store_path = vector_store_path  # 动态路径
        self.stopwords = {
            # 中文虚词
            "的", "地", "得", "了", "也", "在", "和", "等", "是", "从", "中", "以", "与",
            "后", "上", "时", "到", "该", "根据", "就", "将", "要", "会", "并", "或", "通过", "使用",
            "所有", "访问", "输入", "参数", "用户", "命令", "不同", "计算", "章", "解析", "页面", "内容",
            "知识图谱",
            # 英文虚词
            "a", "an", "the", "data",
            # 空白字符
            " ", "\n", "\t", "\u3000", "",
            # 标点符号（全角和半角）
            "`", "~", "!", "！", "@", "#", "$", "%", "^", "&", "*", "(", ")", "（", "）",
            "-", "—", "_", "=", "+", "[", "]", "【", "】", "{", "}", "<", ">", "《", "》",
            ",", "，", ".", "。", ";", "；", ":", "：", "?", "？", "/", "\\", "|",
            "\"", "'", "‘", "’", "“", "”", "、",
            # 数字
            "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"
        }
        self.tfidf_vectorizer = TfidfVectorizer(
            tokenizer=self._jieba_tokenize,
            stop_words=list(self.stopwords),
            min_df=1  # 修改点1: 修复min_df错误
        )

    def _jieba_tokenize(self, text: str) -> List[str]:
        return [
            word.strip().lower()
            for word in list(jieba.cut(text))
            if word.strip()
               and word.strip().lower() not in self.stopwords
               and len(word.strip()) >= 2
        ]

    def load_documents(self) -> None:
        self.documents = []
        encodings_to_try = ["utf-8", "gbk", "latin-1"]
        for file_path in self.knowledge_files:
            file_docs = []
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件不存在：{file_path}")
            file_path = os.path.normpath(file_path)
            print(f"准备加载文件: {file_path}")

            if file_path.lower().endswith(".docx"):
                try:
                    loader = Docx2txtLoader(file_path)
                    file_docs = loader.load()
                    print(f"成功加载docx文件: {file_path}")
                except Exception as e:
                    raise Exception(f"加载docx文件失败：{file_path}\n错误信息：{str(e)}")
            elif file_path.lower().endswith(".txt"):
                loaded_successfully = False
                for encoding in encodings_to_try:
                    try:
                        loader = TextLoader(file_path, encoding=encoding)
                        file_docs = loader.load()
                        print(f"成功使用 {encoding} 编码加载文件: {file_path}")
                        loaded_successfully = True
                        break
                    except UnicodeDecodeError as e:
                        print(f"{encoding} 编码加载失败: {str(e)}")
                    except Exception as e:
                        print(f"加载txt文件失败（使用{encoding}编码）：{file_path}\n错误信息：{str(e)}")
                if not loaded_successfully:
                    raise RuntimeError(f"加载txt文件失败：{file_path}\n错误信息：所有尝试的编码均失败。")
            else:
                raise ValueError(f"不支持的文件格式：{file_path}")
            self.documents.extend(file_docs)
            print(f"成功加载文件：{file_path}（{len(file_docs)}个文档对象）")
        print(f"共加载{len(self.documents)}个文档对象")

    def split_documents(self) -> List:
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=300, chunk_overlap=50, separators=["\n\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        embeddings = HuggingFaceEmbeddings(
            model_name=EMBEDDING_MODEL_PATH,
            model_kwargs={'device': 'cpu'},
            encode_kwargs={'normalize_embeddings': True}
        )
        if os.path.exists(self.vector_store_path) and os.listdir(self.vector_store_path):
            self.vector_store = FAISS.load_local(
                self.vector_store_path, embeddings, allow_dangerous_deserialization=True
            )
            print("已加载缓存的向量存储")
        else:
            splits = self.split_documents()
            self.vector_store = FAISS.from_documents(splits, embeddings)
            os.makedirs(self.vector_store_path, exist_ok=True)
            self.vector_store.save_local(self.vector_store_path)
            print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        if not self.documents:
            self.load_documents()

        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        texts = [text for text in texts if text.strip()]

        if not texts:
            raise ValueError("所有文档分词后无有效内容，无法提取关键词")

        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)
        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight

        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)

        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]] if len(sorted_keywords) >= 20 else [kw for kw, _ in
                                                                                               sorted_keywords],
            "extended": [kw for kw, _ in sorted_keywords[20:100]] if len(sorted_keywords) >= 100 else [kw for kw, _ in
                                                                                                       sorted_keywords[
                                                                                                       20:]]
        }
        print("=== 关键词提取结果 ===")
        print(f"核心关键词（{len(self.keywords['core'])}个）：{', '.join(self.keywords['core'])}")
        print(f"拓展关键词（{len(self.keywords['extended'])}个）：{', '.join(self.keywords['extended'])}\n")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        if self.vector_store is None: self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)


class KnowledgeGraphProcessor:
    def __init__(self, uri: str = NEO4J_URI, user: str = NEO4J_USER, password: str = NEO4J_PASSWORD,
                 database: str = DATABASE_NAME, kg_cache_path: str = None):
        self.driver = None
        self.uri = uri
        self.user = user
        self.password = password
        self.database = database
        self.kg_cache_path = kg_cache_path  # 动态路径
        self.client = OpenAI(
            api_key=API_KEY,
            base_url=BASE_URL,
        )
        self._connect_to_db()

    def _connect_to_db(self):
        try:
            self.driver = GraphDatabase.driver(self.uri, auth=(self.user, self.password))
            self.driver.verify_connectivity()
            print("成功连接到 Neo4j 数据库")
        except Exception as e:
            raise RuntimeError(f"无法连接到 Neo4j 数据库: {e}")

    def close(self):
        if self.driver:
            self.driver.close()

    def create_schema(self):
        with self.driver.session(database=self.database) as session:
            session.run("CREATE CONSTRAINT IF NOT EXISTS FOR (e:Entity) REQUIRE e.name IS UNIQUE")
            session.run("CREATE INDEX IF NOT EXISTS FOR (e:Entity) ON (e.type)")
            print("Neo4j 模式和索引创建成功")

    def clear_graph(self):
        with self.driver.session(database=self.database) as session:
            session.run("MATCH (n) DETACH DELETE n")
            print("Neo4j 数据库已清空")

    def _extract_triplets(self, text: str) -> List[Tuple[str, str, str, str]]:
        prompt = f"""从以下文本中提取出四元组 (主体, 关系, 客体, 关系属性)。
关系属性应尽可能详细和具体，如果关系属性不明确，可以留空。
示例：
文本：大数据主要特点包括海量性、多样性、价值性和时效性。
四元组：(大数据, 特点, 海量性, "特点的子集"), (大数据, 特点, 多样性, "特点的子集"), (大数据, 特点, 价值性, "特点的子集"), (大数据, 特点, 时效性, "特点的子集")

文本：{text}
请只输出四元组，每个四元组一行，格式为：(主体, 关系, 客体, 关系属性)。"""
        try:
            response = self.client.chat.completions.create(
                model=MODEL_NAME,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.1,
            )
            result = response.choices[0].message.content
            quadruplets = []
            for line in result.split('\n'):
                line = line.strip().strip('()')
                if line:
                    parts = line.split(',')
                    if len(parts) >= 3:
                        subject = parts[0].strip()
                        relation = parts[1].strip()
                        object_entity = parts[2].strip()
                        relation_property = parts[3].strip().strip('\"\' ') if len(parts) > 3 else ""
                        quadruplets.append((subject, relation, object_entity, relation_property))
            return quadruplets
        except Exception as e:
            print(f"三元组提取失败: {e}")
            return []

    def load_graph_from_cache(self) -> bool:
        if os.path.exists(self.kg_cache_path):
            print(f"发现本地知识图谱缓存：{self.kg_cache_path}，正在加载...")
            with open(self.kg_cache_path, 'r', encoding='utf-8') as f:
                triplet_data = json.load(f)
            self.clear_graph()
            self.create_schema()
            with self.driver.session(database=self.database) as session:
                for triplet in triplet_data:
                    subject, relation, object_entity = triplet['s'], triplet['r'], triplet['o']
                    relation_property = triplet.get('p', '')
                    cypher_query = """
                        MERGE (s:Entity {name: $subject})
                        MERGE (o:Entity {name: $object})
                        MERGE (s)-[r:`%s`]->(o)
                        SET r.description = $relation_property
                    """ % relation
                    session.run(cypher_query, subject=subject, object=object_entity,
                                relation_property=relation_property)
            print(f"已成功从缓存加载并填充知识图谱，共 {len(triplet_data)} 个三元组。")
            return True
        else:
            print("未找到本地知识图谱缓存，将进行在线构建。")
            return False

    def populate_graph(self, documents: List, event: threading.Event) -> None:
        self.clear_graph()
        self.create_schema()
        all_triplets = []

        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=2000,
            chunk_overlap=200,
            separators=["\n\n", "。", "，", " "]
        )

        chunks_to_process = []
        for doc in documents:
            chunks_to_process.extend(text_splitter.split_text(doc.page_content))

        print(f"总计 {len(chunks_to_process)} 个文本块需要处理。")

        with ThreadPoolExecutor(max_workers=5) as executor:
            future_to_chunk = {executor.submit(self._extract_triplets, chunk): chunk for chunk in chunks_to_process}
            for i, future in enumerate(as_completed(future_to_chunk), 1):
                try:
                    triplets = future.result()
                    if triplets:
                        all_triplets.extend(triplets)
                    print(f"  - 已完成 {i}/{len(chunks_to_process)} 个块的提取，提取 {len(triplets)} 个四元组。")
                except Exception as e:
                    print(f"块处理失败: {e}")

        if not all_triplets:
            print("未从文档中提取到任何四元组。")
        else:
            with self.driver.session(database=self.database) as session:
                for triplet in all_triplets:
                    subject, relation, object_entity, relation_property = triplet
                    cypher_query = """
                        MERGE (s:Entity {name: $subject})
                        MERGE (o:Entity {name: $object})
                        MERGE (s)-[r:`%s`]->(o)
                        SET r.description = $relation_property
                    """ % relation
                    session.run(cypher_query, subject=subject, object=object_entity,
                                relation_property=relation_property)
            print(f"成功将 {len(all_triplets)} 个四元组存入 Neo4j 数据库。")
            triplet_data = [{'s': t[0], 'r': t[1], 'o': t[2], 'p': t[3]} for t in all_triplets]
            parent_dir = os.path.dirname(self.kg_cache_path)
            os.makedirs(parent_dir, exist_ok=True)
            with open(self.kg_cache_path, 'w', encoding='utf-8') as f:
                json.dump(triplet_data, f, ensure_ascii=False, indent=2)
            print(f"知识图谱数据已保存到缓存文件：{self.kg_cache_path}")
            event.set()
            print("知识图谱后台构建完成！")

    def query_graph(self, query_keywords: List[str]) -> str:
        related_triplets = []
        with self.driver.session(database=self.database) as session:
            cypher_query = """
                MATCH (e:Entity)-[r]->(o:Entity)
                WHERE type(r) IN $keywords OR any(keyword IN $keywords WHERE toLower(e.name) CONTAINS toLower(keyword) OR toLower(o.name) CONTAINS toLower(keyword))
                RETURN e.name AS subject, type(r) AS relation, o.name AS object, r.description AS description
                LIMIT 10
            """
            results = session.run(cypher_query, keywords=query_keywords)
            for record in results:
                related_triplets.append(
                    f"({record['subject']}, {record['relation']}, {record['object']}, \"{record['description']}\")")
        return "知识图谱中相关关系:\n" + "\n".join(related_triplets) if related_triplets else "知识图谱中未找到相关关系。"


# --- 问答和评分模块 ---

class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor, kg_processor: KnowledgeGraphProcessor,
                 kg_ready_event: threading.Event):
        self.kb_processor = knowledge_processor
        self.kg_processor = kg_processor
        self.kg_ready_event = kg_ready_event
        self.client = OpenAI(
            api_key=API_KEY,
            base_url=BASE_URL,
        )
        self.generated_questions = []
        self.backup_questions = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords", "forbidden_words", "graph_info"],
            template="""基于以下上下文、关键词和知识图谱信息，生成一个高质量的开放性问题：
关键词：{keywords}
上下文：{context}
知识图谱中相关关系：{graph_info}
要求：
1. 问题必须与提供的上下文、关键词和知识图谱信息紧密相关
2. 禁止出现这些词：{forbidden_words}
3. 问题是开放性的，没有唯一标准答案，需要思考和分析
4. 问题应鼓励多角度思考、批判性分析或创造性解决方案
5. 问题应具有一定深度，能够考察对知识的深入理解和应用能力
6. 问题可以涉及比较、评价、分析原因、提出建议或预测趋势等
7. 问题表述清晰，避免简单的事实性询问
8. 每次输出题目时不要输出解析
9. 禁止出现任何与图表、代码相关的词汇，如"图例"、"源代码"、"图3.2"等
生成的问题："""
        )
        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成一个高质量的开放性问题：
关键词：{keywords}
上下文：{context}
要求：
1. 问题必须与提供的上下文和关键词紧密相关
2. 问题是开放性的，没有唯一标准答案，需要思考和分析
3. 问题应鼓励多角度思考、批判性分析或创造性解决方案
4. 问题应具有一定深度，能够考察对知识的深入理解和应用能力
5. 问题可以涉及比较、评价、分析原因、提出建议或预测趋势等
6. 问题表述清晰，避免简单的事实性询问
7. 每次输出题目时不要输出解析
8. 禁止出现任何与图表、代码相关的词汇，如"图例"、"源代码"、"图3.2"等
生成的问题："""
        )

    def generate_question(self, difficulty: str = "medium") -> str:
        # 1. 提取关键词
        if not self.kb_processor.keywords: self.kb_processor.extract_keywords()
        core_keywords = self.kb_processor.keywords["core"]
        extended_keywords = self.kb_processor.keywords["extended"]

        # 2. 检索相关文档，并根据难度调整关键词数量
        if difficulty == "easy":
            keywords_for_search = core_keywords[:5]
            forbidden_words = extended_keywords
            print("正在生成“简单”难度问题...")
        elif difficulty == "medium":
            keywords_for_search = core_keywords[:10]
            forbidden_words = []
            print("正在生成“中等”难度问题...")
        elif difficulty == "hard":
            keywords_for_search = core_keywords[:15]
            forbidden_words = []
            print("正在生成“困难”难度问题...")
        else:
            keywords_for_search = core_keywords[:10]
            forbidden_words = []
            print("难度选择无效，已默认为“中等”难度。")

        query = " ".join(keywords_for_search)
        relevant_docs = self.kb_processor.retrieve_relevant_docs(query, top_k=5)
        context = " ".join([doc.page_content for doc in relevant_docs])

        # 3. 从知识图谱中查询相关信息（非阻塞）
        kg_thread = threading.Thread(target=self._query_kg_in_background, args=(keywords_for_search,))
        kg_thread.start()

        # 4. 等待知识图谱查询结果，最多等待5秒
        self.kg_ready_event.wait(timeout=5)
        kg_info = self.kg_processor.query_graph(keywords_for_search)

        # 5. 生成问题
        try:
            if kg_info and "未找到" not in kg_info:
                prompt = self.question_prompt.format(
                    context=context,
                    keywords=f"[{', '.join(keywords_for_search)}]",
                    forbidden_words=f"[{', '.join(forbidden_words)}]",
                    graph_info=kg_info
                )
                print(f"使用知识图谱信息生成问题...")
            else:
                prompt = self.backup_prompt.format(
                    context=context,
                    keywords=f"[{', '.join(keywords_for_search)}]",
                    forbidden_words=f"[{', '.join(forbidden_words)}]"
                )
                print(f"使用备用上下文生成问题...")

            response = self.client.chat.completions.create(
                model=MODEL_NAME,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.8,
            )
            # 获取原始问题
            raw_question = response.choices[0].message.content.strip()

            # 使用正则表达式移除括号及其内部内容
            # \（ 和 \） 匹配中文全角括号
            # \( 和 \) 匹配英文半角括号
            # .*? 匹配任意字符（非贪婪模式）
            cleaned_question = re.sub(r'\(.*?\)|（.*?）', '', raw_question)

            # 更新问题列表并返回清理后的问题
            self.generated_questions.append(cleaned_question)
            return cleaned_question
        except Exception as e:
            print(f"生成问题失败，使用备用问题：{e}")
            if self.backup_questions: return self.backup_questions.pop(0)
            return "无法生成问题，请检查配置和网络连接。"


    def _query_kg_in_background(self, keywords: List[str]):
        """在后台线程中查询知识图谱，并设置事件标志"""
        try:
            self.kg_processor.query_graph(keywords)
        except Exception as e:
            print(f"知识图谱查询失败: {e}")
        finally:
            self.kg_ready_event.set()


class ScoringModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor, kg_processor: KnowledgeGraphProcessor):
        self.kb_processor = knowledge_processor
        self.kg_processor = kg_processor
        self.client = OpenAI(
            api_key=API_KEY,
            base_url=BASE_URL,
        )

    def score_answer(self, question: str, answer: str, max_score: int) -> Tuple[int, str]:
        if not answer.strip():
            return 0, "答案为空。"

        try:
            # 检索与问题相关的文档作为参考
            relevant_docs = self.kb_processor.retrieve_relevant_docs(question, top_k=5)
            context = " ".join([doc.page_content for doc in relevant_docs])

            # 评分提示词
            prompt = f"""
你是一名资深的大数据领域专家，请根据提供的“参考资料”，对学生对“问题”的“答案”进行客观、公正的评分，并给出详细的反馈。

评分标准：
- 满分{max_score}分。
- 答案的正确性、完整性、深度和逻辑性是主要的评分依据。
- 答案应包含“参考资料”中与问题相关的核心知识点。
- 答案如果能结合实际应用或进行拓展思考，可获得额外加分。
- 如果答案与问题无关或过于简略，应酌情扣分。
- 评分时请严格按照满分{max_score}分进行打分。

问题：{question}
参考资料：{context}
学生答案：{answer}

请按照以下格式输出：
**分数**：[在此处填写分数]
**反馈**：[在此处填写详细反馈]
"""
            response = self.client.chat.completions.create(
                model=MODEL_NAME,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.1,
            )
            llm_response = response.choices[0].message.content.strip()

            # 从LLM的响应中解析分数和反馈
            score_match = re.search(r'分数.*?：\s*(\d+)', llm_response)
            feedback_match = re.search(r'反馈.*?：\s*(.*)', llm_response, re.DOTALL)

            score = int(score_match.group(1)) if score_match else 0
            feedback = feedback_match.group(1).strip() if feedback_match else "未能生成有效的评分反馈。"

            return score, feedback

        except Exception as e:
            print(f"评分失败：{e}")
            return 0, f"评分系统发生错误，请联系管理员。错误信息：{str(e)}"


# --- 辅助函数 ---

def get_multiline_input(prompt_message: str) -> str:
    print(prompt_message, end='')
    lines = []
    while True:
        line = input()
        if not line:
            break
        lines.append(line)
    return '\n'.join(lines)


def check_abnormal_answer(answer: str, question: str) -> Tuple[bool, str]:
    if len(answer) > 300:
        return True, "答案过长，可能存在复制粘贴行为。"
    if len(answer) < 10:
        return True, "答案过短，可能为无效回答。"
    if not any(word in answer for word in jieba.cut(question)):
        return True, "答案中未包含与问题相关的关键词。"
    return False, ""


# --- 主程序入口 ---

def main():
    print("===== 大数据基础知识章节测试系统启动 =====")

    # 1. 询问用户要测试的章节
    print("\n--- 可测试章节 ---")
    chapter_map = {i + 1: file_path for i, file_path in enumerate(KNOWLEDGE_FILES)}
    for num, path in chapter_map.items():
        chapter_name = os.path.basename(path).replace(".txt", "").replace(".docx", "")
        print(f"  - 第{num}章: {chapter_name}")

    while True:
        try:
            chapter_choice = int(input("\n请选择要测试的章节号（例如: 1）："))
            if chapter_choice not in chapter_map:
                print("无效的章节号，请重新输入。")
                continue
            break
        except ValueError:
            print("输入无效，请输入一个数字。")

    selected_file = chapter_map[chapter_choice]

    # 2. 动态生成缓存路径
    vector_store_path = os.path.join(CACHE_DIR, f"vector_store_ch{chapter_choice}")
    kg_cache_path = os.path.join(CACHE_DIR, f"knowledge_graph_cache_ch{chapter_choice}.json")

    # 3. 初始化模块
    kg_ready_event = threading.Event()
    kb_processor = KnowledgeBaseProcessor(knowledge_files=[selected_file], vector_store_path=vector_store_path)
    kg_processor = KnowledgeGraphProcessor(kg_cache_path=kg_cache_path)
    question_generator = QuestionGenerator(kb_processor, kg_processor, kg_ready_event)
    scoring_module = ScoringModule(kb_processor, kg_processor)

    # 4. 数据准备：加载文档、构建向量存储和知识图谱
    print("\n--- 知识库准备中 ---")
    start_time = time.time()
    try:
        # 加载选定章节的文档
        kb_processor.load_documents()

        # 检查并构建选定章节的知识图谱
        if not kg_processor.load_graph_from_cache():
            kg_thread = threading.Thread(target=kg_processor.populate_graph,
                                         args=(kb_processor.documents, kg_ready_event))
            kg_thread.start()

        # 创建向量存储（可能使用缓存）
        kb_processor.create_vector_store()
        kb_processor.extract_keywords()

    except Exception as e:
        print(f"知识库准备失败：{e}")
        return

    end_time = time.time()
    print(f"知识库准备完成！耗时：{end_time - start_time:.2f}秒\n")

    # 5. 考试开始
    # 修改点2: 使用映射来处理难度选择
    difficulty_map = {
        "简单": "easy",
        "中等": "medium",
        "困难": "hard"
    }
    user_input_difficulty = input("请选择考试难度 (简单/中等/困难): ").strip()
    difficulty = difficulty_map.get(user_input_difficulty, "medium")

    total_score = 0
    exam_answers = []

    print("\n===== 考试开始 =====")
    print(f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分。")

    # 6. 循环出题和评分
    for i in range(TOTAL_QUESTIONS):
        print(f"\n--- 第{i + 1}题 ---")
        question = question_generator.generate_question(difficulty=difficulty)
        print(f"**问题**：{question}\n")

        user_answer = get_multiline_input("请输入你的答案（输入空行结束）：")

        is_abnormal, abnormal_reason = check_abnormal_answer(user_answer, question)

        if is_abnormal:
            score = 0
            feedback = f"检测到非正常答题行为，本题按0分处理。**具体原因**：{abnormal_reason}"
        else:
            print("\n正在评分，请稍候...")
            score, feedback = scoring_module.score_answer(question, user_answer, TOTAL_SCORE // TOTAL_QUESTIONS)

        total_score += score
        exam_answers.append({
            "question": question,
            "answer": user_answer,
            "score": score,
            "feedback": feedback
        })

        print(f"\n**得分**：{score}/{TOTAL_SCORE // TOTAL_QUESTIONS}分")
        print(f"**反馈**：\n{feedback}")

    # 7. 考试结束，输出总分和详情
    print("\n\n===== 考试结束 =====")
    print(f"您的总得分：{total_score} / {TOTAL_SCORE}分")

    print("\n--- 考试详情 ---")
    for i, item in enumerate(exam_answers):
        print(f"\n第{i + 1}题：")
        print(f"问题：{item['question']}")
        print(f"你的答案：\n{item['answer']}")
        print(f"得分：{item['score']}/{TOTAL_SCORE // TOTAL_QUESTIONS}分")
        print(f"反馈：\n{item['feedback']}")

    kg_processor.close()

    print("\n考试系统已退出。")


if __name__ == "__main__":
    main()