import os
import torch
import jieba
import numpy as np
from langchain.memory import ConversationBufferMemory
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpenAI
from typing import List, Dict, Tuple, Optional
import time
import networkx as nx
from transformers import AutoTokenizer, AutoModel
from langchain.embeddings.base import Embeddings
from langchain_community.chat_models import ChatOpenAI

# 配置参数
KNOWLEDGE_FILE = "D:\\桌面\\第3章 数据获取.docx"
VECTOR_STORE_PATH = "./vector_store"
CACHE_DIR = "./cache"
SIMILARITY_THRESHOLD = 0.7
MAX_GENERATION_ATTEMPTS = 5
TOTAL_QUESTIONS = 5
TOTAL_SCORE = 50
LOCAL_EMBEDDING_MODEL_PATH = r"D:\local_models\paraphrase-multilingual-MiniLM-L12-v2"

# 初始化缓存目录
os.makedirs(CACHE_DIR, exist_ok=True)
os.makedirs(VECTOR_STORE_PATH, exist_ok=True)


# 自定义嵌入类，实现 langchain 的 Embeddings 接口
class CustomEmbeddings(Embeddings):
    def __init__(self, tokenizer, model):
        self.tokenizer = tokenizer
        self.model = model
        self.model.eval()

    def embed_query(self, text: str) -> List[float]:
        return self._embed_text(text)

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        return [self._embed_text(text) for text in texts]

    def _embed_text(self, text: str) -> List[float]:
        inputs = self.tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
        with torch.no_grad():
            outputs = self.model(**inputs)
        # 使用均值池化获取句子表示
        embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
        return embeddings.tolist()


# 1. 知识库处理模块
class KnowledgeBaseProcessor:
    def __init__(self, knowledge_file: str = KNOWLEDGE_FILE,
                 embedding_model_path: str = LOCAL_EMBEDDING_MODEL_PATH):
        self.knowledge_file = knowledge_file
        self.vector_store = None
        self.documents = None
        self.keywords = None
        self.embedding_model_path = embedding_model_path

        # 初始化 TF-IDF Vectorizer
        self.tfidf_vectorizer = TfidfVectorizer(tokenizer=self._jieba_tokenize, lowercase=False)

        # 验证模型路径
        print(f"模型路径验证通过: {embedding_model_path}")
        print(f"目录内容: {os.listdir(embedding_model_path)[:5]}...")

        try:
            # 确保词汇表包含所有必要的标记
            vocab_file = os.path.join(embedding_model_path, "vocab.txt")
            if not os.path.exists(vocab_file):
                print("警告: vocab.txt 文件不存在，创建完整词汇表")
                self._create_full_vocab(vocab_file)  # 修改为创建完整词汇表
            else:
                # 检查现有词汇表是否包含 [UNK]
                self._verify_vocab_contains_unk(vocab_file)

            # 直接加载本地模型（无需特殊标记参数）
            self.tokenizer = AutoTokenizer.from_pretrained(
                embedding_model_path,
                unk_token="[UNK]"  # 明确指定 UNK token
            )
            self.model = AutoModel.from_pretrained(embedding_model_path)

            # 创建嵌入模型实例并保存为属性
            self.embeddings_model = CustomEmbeddings(self.tokenizer, self.model)

            print(f"成功加载嵌入模型: {self.embedding_model_path}")

            # 测试嵌入功能
            test_embedding = self.embeddings_model.embed_query("这是一个测试文本。")
            print(f"测试嵌入成功，向量维度：{len(test_embedding)}")
        except Exception as e:
            print(f"加载嵌入模型失败: {e}")
            raise

    def _create_full_vocab(self, vocab_path: str):
        """创建包含所有必需标记的词汇表"""
        required_tokens = [
            "[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]",
            "，", "。", "的", "了", "是", "在", "和", "有", "就", "不", "人",
            "我", "他", "她", "它", "这", "那", "你", "们", "说", "要", "去",
            "上", "下", "中", "为", "个", "也", "都", "而", "并", "且", "或",
            "如果", "因为", "所以", "但是", "然后", "虽然", "即使", "可以", "可能",
            "应该", "一定", "必须", "需要", "要求", "问题", "答案", "知识", "数据",
            "获取", "处理", "分析", "方法", "技术", "系统", "信息", "网络", "文件"
        ]

        with open(vocab_path, "w", encoding="utf-8") as f:
            for token in required_tokens:
                f.write(f"{token}\n")
        print(f"已创建完整词汇表: {vocab_path}（包含{len(required_tokens)}个标记）")

        # 验证词汇表包含 [UNK]
        self._verify_vocab_contains_unk(vocab_path)

    def _verify_vocab_contains_unk(self, vocab_path: str):
        """验证词汇表是否包含 [UNK] 标记"""
        with open(vocab_path, "r", encoding="utf-8") as f:
            vocab_lines = f.readlines()

        # 检查是否包含 [UNK]
        if "[UNK]" not in [line.strip() for line in vocab_lines]:
            print("警告: 词汇表缺少 [UNK] 标记，正在修复...")
            # 在词汇表开头添加 [UNK]
            with open(vocab_path, "r+", encoding="utf-8") as f:
                content = f.read()
                f.seek(0, 0)
                f.write("[UNK]\n" + content)
            print("已修复词汇表，添加了 [UNK] 标记")

    def _jieba_tokenize(self, text: str) -> List[str]:
        """使用jieba进行中文分词"""
        return list(jieba.cut(text))

    def _embed_query(self, text: str) -> List[float]:
        """自定义嵌入函数（供其他模块使用）"""
        return self.embeddings_model.embed_query(text)

    def load_documents(self) -> None:
        """加载docx文件"""
        if not os.path.exists(self.knowledge_file):
            raise FileNotFoundError(f"文件不存在：{self.knowledge_file}")

        if not self.knowledge_file.lower().endswith(".docx"):
            raise ValueError(f"文件格式错误：{self.knowledge_file}")

        loader = Docx2txtLoader(self.knowledge_file)
        self.documents = loader.load()
        print(f"成功加载docx文件：{self.knowledge_file}（共{len(self.documents)}个文档对象）")

    def split_documents(self) -> List:
        """将文档拆分为较小的段落"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        """创建或加载文档的向量存储"""
        if os.path.exists(VECTOR_STORE_PATH) and os.listdir(VECTOR_STORE_PATH):
            try:
                # 加载向量存储
                self.vector_store = FAISS.load_local(
                    VECTOR_STORE_PATH,
                    self.embeddings_model,
                    allow_dangerous_deserialization=True
                )
                print("已加载缓存的向量存储")
            except Exception as e:
                print(f"加载缓存向量存储失败: {e}，将重新创建。")
                self._create_new_vector_store()
        else:
            self._create_new_vector_store()

    def _create_new_vector_store(self):
        """创建新的向量存储"""
        if self.documents is None:
            self.load_documents()  # 确保文档已加载
        splits = self.split_documents()

        # 创建向量存储
        self.vector_store = FAISS.from_documents(splits, self.embeddings_model)
        self.vector_store.save_local(VECTOR_STORE_PATH)  # 保存到本地
        print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        """从文档中提取关键词"""
        if self.documents is None:
            self.load_documents()

        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]

        # 确保texts不是空的
        if not texts:
            print("没有文本内容可用于关键词提取。")
            self.keywords = {"core": [], "extended": []}
            return

        # 确保fit_transform在非空文本上调用
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)

        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight

        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],
            "extended": [kw for kw, _ in sorted_keywords[20:100]]
        }
        print(f"提取关键词：核心{len(self.keywords['core'])}个，扩展{len(self.keywords['extended'])}个")
        if not self.keywords["core"]:
            print("警告：没有提取到核心关键词，这可能会影响问题生成。")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        """根据查询获取相关文档"""
        if self.vector_store is None:
            self.create_vector_store()  # 确保vector store已创建
        return self.vector_store.similarity_search(query, k=top_k)


# 2. 题目生成模块
class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor,
                 embedding_model_path: str = LOCAL_EMBEDDING_MODEL_PATH):
        self.kb_processor = knowledge_processor
        self.client = ChatOpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
            model="deepseek-v3"
        )
        self.generated_questions = []
        self.backup_questions = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        # 修正 question_prompt，使其更侧重于生成问题而不是评分
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""作为一个专业的AI考试出题员，请根据以下上下文和关键词生成一个具有考察意义的客观问题。
            关键词：{keywords}
            上下文：{context}
            要求：
            1. 问题内容应紧密围绕上下文和关键词。
            2. 问题应清晰、简洁、不含歧义。
            3. 避免生成过于简单或过于复杂的问题。
            4. 禁止出现"如图"、“所示”、“源程序”、“源代码”、“截图”、“示例程序”等与图片或代码相关的词汇。
            5. 问题末尾不带问号。
            问题：
            """
        )

        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题：
            关键词：{keywords}
            上下文：{context}
            要求：每个问题包含至少一个关键词，不要编号，每行一个问题，禁止出现"如图"“所示”等图片相关词汇
            问题："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        """检查文本中是否包含禁止词"""
        forbidden = ["如图", "如图所示", "图例", "源程序", "所示", "源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def calculate_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本之间的语义相似度"""
        emb1 = np.array(self.kb_processor._embed_query(text1))
        emb2 = np.array(self.kb_processor._embed_query(text2))
        # 计算余弦相似度
        return np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))

    def _generate_backup_questions(self):
        """生成备选问题"""
        if not self.backup_questions:
            # 确保kb_processor.documents 已加载
            if not self.kb_processor.documents:
                self.kb_processor.load_documents()

            # 提取足够多的上下文，避免内容过少导致生成问题失败
            context_docs = self.kb_processor.documents
            context = "\n\n".join([doc.page_content for doc in context_docs[:min(5, len(context_docs))]])  # 取前5个文档作为上下文

            # 确保关键词已提取
            if not self.kb_processor.keywords or not self.kb_processor.keywords["core"]:
                self.kb_processor.extract_keywords()  # 如果没有关键词，尝试提取

            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10]) if self.kb_processor.keywords[
                "core"] else ""

            if not context or not keywords_str:
                print("警告：无法生成备用问题，因为上下文或关键词为空。")
                return

            try:
                result = self.get_ai_response(
                    prompt=self.backup_prompt.format(context=context, keywords=keywords_str)
                )
                self.backup_questions = [q.strip() for q in result.split("\n") if q.strip()]
                print(f"生成保底题库：{len(self.backup_questions)}题")
            except Exception as e:
                print(f"生成备用问题失败: {e}")

    def get_ai_response(self, prompt: str, max_retries: int = 3) -> str:
        """与AI进行交互获取响应"""
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.invoke(prompt)
                return response.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                print(f"API调用失败，正在重试({retry_count}/{max_retries})...")
                time.sleep(2)
        return ""

    def generate_question(self) -> str:
        """生成问题"""
        attempts = 0

        self._generate_backup_questions()  # 确保备用问题生成

        while attempts < MAX_GENERATION_ATTEMPTS:
            try:
                # 确保关键词列表非空
                all_keywords = self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"]
                if not all_keywords:
                    print("没有可用的关键词来生成问题。")
                    break

                # 随机选择关键词，确保数量合理
                num_keywords = np.random.randint(1, min(len(all_keywords) + 1, 3))
                keywords = np.random.choice(all_keywords, size=num_keywords, replace=False)
                keyword_str = ", ".join(keywords)

                # 检索相关文档
                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

                if not context:
                    print(f"未能为关键词 '{keyword_str}' 找到足够的上下文。")
                    attempts += 1
                    continue

                prompt_content = self.question_prompt.format(
                    context=context,
                    keywords=keyword_str
                )
                question = self.get_ai_response(prompt_content).strip()

                if not question:  # AI没有返回任何内容
                    attempts += 1
                    continue

                if self._check_forbidden_words(question):
                    attempts += 1
                    continue

                # 检查生成的问题是否与现有问题相似
                if not any(self.calculate_similarity(question, q) > SIMILARITY_THRESHOLD for q in
                           self.generated_questions):
                    self.generated_questions.append(question)
                    return question

            except Exception as e:
                print(f"题目生成失败（尝试{attempts + 1}/{MAX_GENERATION_ATTEMPTS}）：{str(e)}")

            attempts += 1
            time.sleep(1)

        print("所有尝试失败，正在使用保底题目...")
        # 从备用问题中选择一个未被使用且不含禁止词的问题
        for q in self.backup_questions:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q

        # 如果备用问题也用完了或者没有合适的
        if self.backup_questions:
            return np.random.choice(self.backup_questions)  # 随机返回一个，可能重复或有禁止词（作为最后手段）
        else:
            return "无法生成任何问题。请检查知识库和API连接。"


# 3. 考试交互模块
class ExamInteractionModule:
    def __init__(self, question_generator: QuestionGenerator):
        self.question_generator = question_generator
        self.memory = ConversationBufferMemory(memory_key="chat_history")
        self.score = 0
        self.current_question_index = 0  # 记录当前是第几题，从0开始
        self.answers = []

    def start_exam(self) -> None:
        """开始考试"""
        print(f"===== 考试开始 =====")
        print(f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分")
        print(f"每题答完后将自动评分，输入空行结束答题\n")

    def get_next_question(self) -> Optional[str]:
        """获取下一道题目"""
        if self.current_question_index < TOTAL_QUESTIONS:
            self.current_question_index += 1  # 题号加1
            return self.question_generator.generate_question()
        return None

    def collect_answer(self, question: str) -> str:
        """收集用户的答案"""
        print(f"\n第{self.current_question_index}题：{question}")
        print("请输入答案（输入空行结束）：")

        answer_lines = []
        while True:
            line = input()
            if not line:
                break
            answer_lines.append(line)

        user_answer = "\n".join(answer_lines)
        # 先将当前问题和答案加入列表，后续会更新分数和反馈
        self.answers.append({
            "question": question,
            "answer": user_answer,
            "score": 0,
            "feedback": ""
        })
        return user_answer

    def check_abnormal_answer(self, answer: str) -> bool:
        """检查用户答案是否异常"""
        abnormal_patterns = [
            "不会", "没学", "不知道", "不懂", "略",
            "乱答", "随便写", "同上", "无", "null"
        ]
        answer_lower = answer.lower().strip()

        if not answer_lower:  # 空答案也视为异常
            return True

        question_lower = self.answers[-1]["question"].lower().strip() if self.answers else ""

        for pattern in abnormal_patterns:
            if pattern in answer_lower:
                return True

        # 答案和问题过于相似，可能是抄袭问题本身
        if question_lower and (answer_lower in question_lower or question_lower in answer_lower):
            return True

        # 答案过短且没有实质内容
        if len(answer_lower) < 5 and len(answer_lower.split()) <= 2:
            return True

        return False

    def finish_exam(self) -> None:
        """结束考试"""
        print(f"\n===== 考试结束 =====")
        print(f"您的总得分：{self.score}/{TOTAL_SCORE}")
        print("\n===== 答题详情 =====")
        for i, item in enumerate(self.answers, 1):
            print(f"\n第{i}题：{item['question']}")
            print(f"您的答案：{item['answer']}")
            print(f"得分：{item['score']}")
            print(f"反馈：{item['feedback']}")


# 4. 评分反馈模块
class ScoringFeedbackModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor,
                 embedding_model_path: str = LOCAL_EMBEDDING_MODEL_PATH):
        self.kb_processor = knowledge_processor
        self.client = ChatOpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
            model="deepseek-v3",
            temperature=0.3
        )
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points", "similarity_score"],
            template="""作为一个专业的AI考试评分员，请严格按照以下要求对用户答案进行评分和反馈。
            - **问题:** {question}
            - **用户答案:** {user_answer}
            - **标准知识参考 (可能包含正确答案):** {context}
            - **相关关键词:** {keywords}
            - **本题总分:** {total_points}

            请综合考虑用户答案与标准知识的相关性、准确性、完整性以及关键词覆盖度进行评分。
            - **评分标准:**
                - 答案准确、完整、流畅，覆盖核心概念：得高分。
                - 答案部分准确，或有遗漏，或表达不够清晰：得中等分。
                - 答案错误，或与问题不相关，或过于简略：得低分或0分。
            - **语义相似度参考:** 您的答案与标准知识的语义相似度为 {similarity_score:.2f} (越高越好)。

            请直接输出得分（仅数字，不带单位，例如：5），然后另起一行给出详细的反馈意见。
            请避免在反馈中直接出现"语义相似度"或具体的相似度分数。

            例如：
            8
            您的答案准确地解释了...，内容完整。

            或：
            3
            您的答案提到了部分关键信息，但对...的解释不够深入。

            得分：
            """
        )

    def get_ai_response(self, prompt: str, max_retries: int = 3) -> str:
        """与AI进行交互获取响应，用于评分模块"""
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.invoke(prompt)
                return response.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                print(f"API调用失败，正在重试({retry_count}/{max_retries})...")
                time.sleep(2)
        return ""

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        """评分用户答案"""
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

        # 如果没有检索到相关文档，则上下文为空
        if not context:
            return 0, "未找到相关标准知识进行评分，请检查知识库或问题相关性。"

        # 计算用户答案与检索到的上下文的语义相似度
        similarity_score = self._calculate_semantic_similarity(user_answer, context)

        # 确保关键词已提取
        if not self.kb_processor.keywords or not self.kb_processor.keywords["core"]:
            self.kb_processor.extract_keywords()  # 如果没有关键词，尝试提取

        keywords = ", ".join(self.kb_processor.keywords["core"][:10]) if self.kb_processor.keywords["core"] else ""

        prompt_content = self.scoring_prompt.format(
            question=question,
            user_answer=user_answer,
            context=context,
            keywords=keywords,
            total_points=total_points,
            similarity_score=similarity_score
        )

        try:
            result = self.get_ai_response(prompt_content)
            score_lines = result.split("\n", 1)  # 只按第一个换行符分割

            # 尝试解析分数
            try:
                score_str = score_lines[0].strip()
                score = int(score_str)
            except ValueError:
                print(f"警告：无法从 '{score_lines[0]}' 解析出分数，默认为0分。")
                score = 0

            feedback = score_lines[1].strip() if len(score_lines) > 1 else "没有提供详细反馈。"

            return max(0, min(total_points, score)), feedback
        except Exception as e:
            print(f"评分请求或解析失败：{str(e)}")
            return 0, "评分过程出错，请联系管理员。"

    def _calculate_semantic_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本之间的语义相似度"""
        emb1 = np.array(self.kb_processor._embed_query(text1))
        emb2 = np.array(self.kb_processor._embed_query(text2))
        # 计算余弦相似度
        return np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))


# 5. 知识图谱验证模块 (如果不需要，建议删除)
class KnowledgeGraphValidator:
    def __init__(self, documents):
        self.graph = nx.DiGraph()
        # 注意: _build_graph 目前是空的，这意味着知识图谱验证不会实际执行
        # self._build_graph(documents)
        print("警告：知识图谱验证模块未完全实现，当前不执行任何验证逻辑。")

    def _build_graph(self, documents):
        """从文档中构建知识图谱 (目前是空实现)"""
        pass  # 复杂的一步：填充self.graph

    def validate_logic(self, user_answer: str) -> dict:
        """验证用户答案的逻辑一致性 (目前是空实现)"""
        # print("知识图谱验证功能未启用。")
        return {"logical_consistency": True, "entity_coverage": 0}  # 默认返回通过，因为未实现

    def _extract_entities(self, text):
        """从文本中提取已知实体 (目前是空实现)"""
        return []


# 主程序
def main():
    try:
        kb_processor = KnowledgeBaseProcessor()
        kb_processor.load_documents()
        kb_processor.create_vector_store()
        kb_processor.extract_keywords()

        question_generator = QuestionGenerator(kb_processor)
        exam_interaction = ExamInteractionModule(question_generator)
        scoring_module = ScoringFeedbackModule(kb_processor)

        exam_interaction.start_exam()

        for _ in range(TOTAL_QUESTIONS):
            question = exam_interaction.get_next_question()
            if not question or "无法生成任何问题" in question:  # 检查是否成功生成问题
                print("\n无法生成更多问题，考试提前结束。")
                break

            user_answer = exam_interaction.collect_answer(question)

            if exam_interaction.check_abnormal_answer(user_answer):
                exam_interaction.answers[-1]["score"] = 0
                exam_interaction.answers[-1]["feedback"] = "检测到非正常答题行为，本题按0分处理"
                print("得分：0分")
                print("反馈：检测到非正常答题行为，本题按0分处理")
                continue

            points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
            score, feedback = scoring_module.score_answer(
                question,
                user_answer,
                points_per_question
            )

            exam_interaction.score += score
            # 由于 collect_answer 已经添加了 answer 字典，这里直接更新最后一个元素的 score 和 feedback
            if exam_interaction.answers:  # 确保answers列表不为空
                exam_interaction.answers[-1]["score"] = score
                exam_interaction.answers[-1]["feedback"] = feedback

            print(f"得分：{score}/{points_per_question}分")
            print(f"反馈：{feedback}")

        exam_interaction.finish_exam()

    except Exception as e:
        print(f"\n程序出错：{str(e)}")
        import traceback
        traceback.print_exc()  # 打印更详细的错误信息
        print("请根据提示解决问题后重新运行程序")


if __name__ == "__main__":
    main()