import os
import chardet
import time
import docx
import re
import numpy as np
import random
import jieba  # 引入jieba分词库
import jieba.analyse  # 引入关键词提取功能
from typing import List, Dict, Tuple, Any
from openai import OpenAI
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


class SmartExamBot:
    def __init__(self, api_key: str, base_url: str, max_questions: int = 5):
        self.client = OpenAI(api_key=api_key, base_url=base_url)
        self.max_questions = max_questions
        self.question_count = 0
        self.total_score = 0
        self.max_possible_score = 0
        self.question_scores = []
        self.knowledge_content = ""
        self.knowledge_passages = []
        self.knowledge_vectors = None
        self.vectorizer = TfidfVectorizer()
        self.generated_questions = []
        self.current_question = ""
        self.question_dimensions = []
        self.keywords = []
        self.image_patterns = re.compile(r'图片|图示|源程序|代码段|截图|示例程序')
        self.cannot_answer_patterns = re.compile(
            r'不会|不懂|不知道|不清楚|不了解|未学|没学|跳过|下一题|不知道|无法回答|答不了|放弃|不答|不想答|不想回答')
        self.irrelevant_patterns = re.compile(
            r'无关|不相关|不知道|不会|不懂|没学|未学|不清楚|不了解|跳过|下一题|不知道|无法回答|答不了|放弃|不答|不想答|不想回答')
        self.unrelated_topics = ['政治', '经济', '文化', '体育', '娱乐', '游戏', '生活', '旅游', '美食', '历史', '地理',
                                 '音乐', '电影', '电视剧', '综艺']
        self.question_similarity_threshold = 0.6  # 适度降低阈值
        self.used_keywords = []  # 新增：记录已使用关键词
        self.used_angles = set()  # 新增：记录已使用角度
        self.backup_questions = [  # 新增：保底题目库
            "在大规模分布式爬虫场景中，如何通过with...as...语句解决文件句柄耗尽问题？请从资源管理和系统稳定性两个维度说明",
            "对比try-finally语句与with...as...语句在数据安全方面的优劣，从异常处理和资源释放两个维度分析",
            "在高频率动态页面爬取场景中，如何通过自动关闭机制保障数据完整性？请从技术实现和数据可靠性两个维度说明",
            "对比手动关闭文件与自动关闭机制在开发效率方面的差异，从代码量和出错率两个维度分析",
            "在需要断点续爬的场景中，如何通过with...as...语句确保临时文件的正确处理？请从程序健壮性和用户体验两个维度说明",
            "对比单线程爬虫与多线程爬虫中文件句柄管理的差异，从资源竞争和稳定性两个维度分析",
            "在爬取过程突发断电的场景中，自动关闭机制如何保护未完成数据？请从数据恢复和系统保护两个维度说明",
            "对比直接文件操作与with语句在爬虫长期运行中的表现，从内存占用和句柄泄漏两个维度分析",
            "在需要爬取海量小文件的场景中，如何通过with...as...提升处理效率？请从IO优化和代码简洁性两个维度说明",
            "对比不同编程语言中文件自动管理机制的实现，从语法设计和用户体验两个维度分析"
        ]
        self.used_backup_questions = set()  # 新增：记录已使用保底题目

        # 扩展题目角度库
        self.available_angles = ["技术实现", "用户体验", "资源管理", "开发效率", "数据完整性",
                                 "系统稳定性", "数据安全", "代码简洁性", "异常处理", "资源释放"]
        self.vectorization_cache = {}  # 新增：知识库向量缓存

        # 预设核心关键词池
        self.core_keywords = {"文件句柄", "资源泄漏", "with...as...", "自动关闭", "数据安全",
                              "开发效率", "爬虫稳定性", "资源管理", "句柄释放"}
        self.extended_keywords = set()  # 扩展关键词池
        # 扩展题目角度库
        self.available_angles = ["技术实现", "用户体验", "资源管理", "开发效率", "数据完整性",
                                 "系统稳定性", "数据安全", "代码简洁性", "异常处理", "资源释放"]
        self.vectorization_cache = {}  # 新增：知识库向量缓存

        # 预设核心关键词池
        self.core_keywords = {"文件句柄", "资源泄漏", "with...as...", "自动关闭", "数据安全",
                              "开发效率", "爬虫稳定性", "资源管理", "句柄释放"}
        self.extended_keywords = set()  # 扩展关键词池

    def _chunk_knowledge(self, content: str, chunk_size: int = 500) -> List[str]:
        chunks = []
        words = content.split()
        for i in range(0, len(words), chunk_size):
            chunk = " ".join(words[i:i + chunk_size])
            chunks.append(chunk)
        return chunks

    def _vectorize_knowledge(self):
        """向量化知识库内容，添加缓存功能"""
        # 检查缓存
        cache_key = hash(self.knowledge_content)
        if cache_key in self.vectorization_cache:
            self.knowledge_passages, self.knowledge_vectors = self.vectorization_cache[cache_key]
            return

        if not self.knowledge_passages:
            self.knowledge_passages = self._chunk_knowledge(self.knowledge_content)
        self.vectorizer.fit(self.knowledge_passages)
        self.knowledge_vectors = self.vectorizer.transform(self.knowledge_passages)

        # 保存到缓存
        self.vectorization_cache[cache_key] = (self.knowledge_passages, self.knowledge_vectors)

    def retrieve_relevant_passages(self, query: str, top_k: int = 3) -> List[str]:
        if self.knowledge_vectors is None:
            self._vectorize_knowledge()
        query_vec = self.vectorizer.transform([query])
        similarities = cosine_similarity(query_vec, self.knowledge_vectors)
        top_indices = np.argsort(similarities[0])[-top_k:][::-1]
        return [self.knowledge_passages[i] for i in top_indices]

    def _is_question_too_similar(self, new_question: str) -> bool:
        if not self.generated_questions:
            return False

        # 关键词重叠率计算
        new_keywords = self._extract_keywords(new_question)
        for old_question in self.generated_questions:
            old_keywords = self._extract_keywords(old_question)
            common = len(set(new_keywords) & set(old_keywords))
            total = len(set(new_keywords) | set(old_keywords))
            keyword_overlap = common / total if total > 0 else 0

            # 余弦相似度计算
            new_vec = self.vectorizer.transform([new_question])
            old_vec = self.vectorizer.transform([old_question])
            cosine_sim = cosine_similarity(new_vec, old_vec)[0][0]

            # 综合判断：两者有一者过高即视为相似
            if cosine_sim > self.question_similarity_threshold or keyword_overlap > 0.7:
                return True
        return False

    def _extract_keywords(self, text: str) -> List[str]:
        """使用jieba进行更专业的关键词提取（修复了allowPOS参数问题）"""
        # 使用TextRank算法提取关键词（不使用allowPOS参数）
        keywords = jieba.analyse.textrank(
            text,
            topK=15,
            withWeight=False
        )

        # 过滤停用词
        stop_words = {'可以', '一个', '这个', '我们', '进行', '需要', '应该', '问题', '方法', '的', '是', '在', '了',
                      '和', '有', '对', '为', '等', '就', '而', '与', '中', '到'}
        filtered_keywords = [kw for kw in keywords if kw not in stop_words and len(kw) > 1]

        # 添加核心关键词
        for core_kw in self.core_keywords:
            if core_kw in text and core_kw not in filtered_keywords:
                filtered_keywords.append(core_kw)

        # 确保关键词唯一性
        unique_keywords = []
        for kw in filtered_keywords:
            if kw not in unique_keywords:
                unique_keywords.append(kw)

        return unique_keywords[:10] if unique_keywords else ["文件句柄", "资源泄漏", "数据爬取"]

    def _get_unused_angles(self, num: int = 2) -> List[str]:
        """获取未使用的分析角度"""
        unused_angles = [angle for angle in self.available_angles if angle not in self.used_angles]

        if len(unused_angles) >= num:
            return random.sample(unused_angles, num)

        # 如果未使用角度不足，随机选择并重置部分已用角度
        selected = random.sample(self.available_angles, num)
        # 重置最旧的两个角度以保持流动性
        if len(self.used_angles) > 5:
            oldest_angles = list(self.used_angles)[:2]
            self.used_angles = self.used_angles - set(oldest_angles)
        return selected

    def _extract_dimensions(self, question: str) -> List[str]:
        match = re.search(r'请从(.*?)分析', question)
        if match:
            dimensions_text = match.group(1)
            dimensions = [dim.strip() for dim in re.split(r'、|和|与', dimensions_text) if dim.strip()]
            return dimensions
        return []

    def _is_duplicating_question(self, question: str, answer: str) -> bool:
        if not question or not answer:
            return False

        def preprocess(text):
            text = re.sub(r'[^\w\s]', '', text)
            text = re.sub(r'请从|分析|简述|说明|论述|谈谈|如何|什么|为什么|是什么|有哪些', '', text,
                          flags=re.IGNORECASE)
            return text.strip()

        processed_question = preprocess(question)
        processed_answer = preprocess(answer)

        if len(processed_answer) < len(processed_question) * 0.5:
            return True

        common_chars = 0
        min_len = min(len(processed_question), len(processed_answer))
        for i in range(min_len):
            if processed_question[i] == processed_answer[i]:
                common_chars += 1

        if min_len > 0 and common_chars / min_len > 0.3:
            return True

        question_words = set(re.findall(r'\b[\u4e00-\u9fa5a-zA-Z]{2,}\b', processed_question))
        answer_words = set(re.findall(r'\b[\u4e00-\u9fa5a-zA-Z]{2,}\b', processed_answer))

        if question_words and len(answer_words.intersection(question_words)) / len(question_words) > 0.7:
            return True

        return True if processed_answer in processed_question or processed_question in processed_answer else False

    def detect_encoding(self, file_path: str) -> str:
        if file_path.lower().endswith('.docx'):
            return 'docx'
        try:
            with open(file_path, 'rb') as file:
                result = chardet.detect(file.read(10240))
                return result['encoding'] or 'utf-8'
        except Exception as e:
            raise Exception(f"编码检测失败: {str(e)}")

    def read_file_content(self, file_path: str) -> str:
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")

        if not os.path.isfile(file_path):
            raise IsADirectoryError(f"{file_path}不是一个文件")

        try:
            encoding = self.detect_encoding(file_path)

            if encoding == 'docx':
                doc = docx.Document(file_path)
                full_text = []
                for para in doc.paragraphs:
                    full_text.append(para.text)
                for table in doc.tables:
                    for row in table.rows:
                        row_text = [cell.text for cell in row.cells]
                        full_text.append('\t'.join(row_text))
                return '\n'.join(full_text)

            encodings_to_try = [encoding, 'utf-8', 'gbk', 'latin-1']
            for enc in encodings_to_try:
                try:
                    with open(file_path, 'r', encoding=enc, errors='replace') as file:
                        return file.read()
                except UnicodeDecodeError:
                    continue
            raise Exception("无法识别文件编码")
        except Exception as e:
            raise Exception(f"文件读取失败: {str(e)}")

    def get_ai_response(self, messages: List[Dict], max_retries: int = 3) -> str:
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-v3",
                    messages=messages,
                    temperature=0.7,
                    timeout=30
                )
                return response.choices[0].message.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                print(f"API调用失败，正在重试({retry_count}/{max_retries})...")
                time.sleep(2)

    def is_cannot_answer(self, user_input: str) -> bool:
        user_input = user_input.strip()
        if not user_input or re.fullmatch(r'[\s,.!?;]+', user_input):
            return True
        return self.cannot_answer_patterns.search(user_input) is not None

    def is_irrelevant(self, user_answer: str) -> Tuple[bool, str]:
        user_answer = user_answer.strip().lower()
        if self.irrelevant_patterns.search(user_answer):
            return True, "回答明确表示与题目无关"
        question_keywords = re.findall(r'\b[\u4e00-\u9fa5]{2,6}\b', self.current_question)
        all_keywords = question_keywords + self.keywords + self.question_dimensions
        has_relevant_keywords = any(keyword in user_answer for keyword in all_keywords)
        if not has_relevant_keywords:
            for topic in self.unrelated_topics:
                if topic in user_answer:
                    return True, f"回答涉及无关主题({topic})，视为乱答"
            if len(user_answer) < 5:
                return True, "回答过短且不涉及题目相关内容，视为乱答"
            return True, "回答与题目及知识库内容无关，视为乱答"
        return False, ""

    def start_exam(self, knowledge_content: str):
        self.knowledge_content = knowledge_content
        self.keywords = self._extract_keywords(knowledge_content)
        self._vectorize_knowledge()
        print("\n" + "=" * 50)
        print("智能考试机器人".center(40))
        print("=" * 50)
        print("\n我将根据数据爬取场景下的文件操作机制知识对你进行考察。")
        print(f"考试共{self.max_questions}题。")
        print("输入'退出'可随时结束考试。")
        print("注意：可以输入多行内容，以空行结束输入\n")
        messages = [
            {"role": "system", "content": f"""
            你是一个公平的智能考试机器人，需要基于数据爬取场景下的文件操作机制相关知识出题和评分：
            核心考察点：文件自动创建与关闭机制在数据爬取中的重要性，尤其是with...as...语句的优势

            工作模式：

            1. 题目生成模式：
            - 必须围绕数据爬取场景下的文件操作机制
            - 题目必须包含至少1个关键词：文件句柄、资源泄漏、with...as...、自动关闭、数据安全、开发效率、爬虫稳定性
            - 必须要求从技术实现、用户体验两个维度回答
            - 示例："请从技术实现和用户体验两个维度分析，在数据爬取中使用with...as...语句管理文件的优势"
            - 只返回题目，不要包含评分信息、评分标准等其他任何信息
            - 题目必须多样化，避免与已出题目相似
            - 不要给出回答建议
            - 尽量从不同角度提问（如：资源管理、开发效率、数据完整性等）
            """},
        ]
        while self.question_count < self.max_questions:
            self._handle_new_question(messages)
            print("\n你: ", end="", flush=True)
            user_input_lines = []
            while True:
                try:
                    line = input().strip()
                except EOFError:
                    line = "退出"
                if line.lower() in ['退出', 'q', 'quit']:
                    self._show_final_score()
                    return
                if not line and user_input_lines:
                    user_input = '\n'.join(user_input_lines)
                    break
                if line:
                    user_input_lines.append(line)
                elif not user_input_lines:
                    print("请输入内容或命令：", end="", flush=True)
                    continue
            self._handle_user_answer(user_input, messages)
            print(f"\n本题结束，当前进度: {self.question_count}/{self.max_questions}题")
        self._show_final_score()

    def _handle_new_question(self, messages: List[Dict]):
        self.question_count += 1
        max_retries = 5
        retry_count = 0
        question = None

        # 1. 多样化题目模板
        available_templates = [
            "请从[技术实现]和[用户体验]分析[关键词]在数据爬取中的重要性",
            "在[具体爬取场景]中，如何通过[关键词]解决[问题]？请从[维度1]和[维度2]说明",
            "对比[方案A]和[方案B]在[关键词]方面的优劣，从[维度1]和[维度2]分析",
            "为什么[关键词]是[具体爬取场景]中必须关注的要点？请从[技术实现]和[开发效率]分析"
        ]

        while retry_count < max_retries:
            # 随机选择未使用的角度
            selected_angles = self._get_unused_angles(2)
            angle_str = "和".join(selected_angles)

            # 3. 增加知识库检索范围 (top_k从2增加到3)
            context_query = "文件操作 数据爬取 with语句"
            relevant_passages = self.retrieve_relevant_passages(context_query, top_k=3)
            context = "\n\n".join(relevant_passages)

            # 轮换使用模板
            template = available_templates[retry_count % len(available_templates)]

            # 4. 动态调整重试策略
            retry_hint = ""
            if retry_count > 0:
                used_kw_str = ", ".join(self.used_keywords[-3:]) if self.used_keywords else "无"
                retry_hint = f"\n重试提示：请换一个全新的角度，避免使用关键词[{used_kw_str}]，避免与已出题目的结构相似"

            prompt = f"""请基于以下知识库内容出一道新颖的主观题（避免与已出题目相似）：
            知识库相关段落：
            {context[:1500]}
            要求：
            1. 必须从以下关键词中选择至少1个（优先未使用的）：文件句柄、资源泄漏、with...as...、自动关闭、数据安全、开发效率、爬虫稳定性
            2. 题目形式请参考模板：{template}
            3. 确保题目在知识库中有明确答案依据
            4. 必须包含分析角度：{angle_str}
            5. 仅输出题目内容，不要包含其他说明
            {retry_hint}"""

            messages.append({"role": "user", "content": prompt})
            question = self.get_ai_response(messages)

            # 验证题目有效性
            validation_failed = False
            required_keywords = {'文件句柄', '资源泄漏', 'with...as...', '自动关闭', '数据安全', '开发效率',
                                 '爬虫稳定性'}
            if not any(keyword in question for keyword in required_keywords):
                print("⚠️ 题目未包含必要关键词，重新生成...")
                validation_failed = True
            # 修改点：不再要求必须包含两个维度，只要求至少一个维度
            elif len(re.findall(r'技术实现|用户体验|系统稳定性|数据可靠性', question)) < 1:
                print("⚠️ 题目未明确要求至少一个分析维度，重新生成...")
                validation_failed = True
            elif self.image_patterns.search(question):
                print("⚠️ 题目包含图片/源程序相关表述，重新生成...")
                validation_failed = True
            elif not question or len(question) < 10:
                print("⚠️ 题目过短，重新生成...")
                validation_failed = True
            elif self._is_question_too_similar(question):
                print("⚠️ 题目与历史题目过于相似，重新生成...")
                validation_failed = True

            if validation_failed:
                retry_count += 1
                messages.pop()
                continue

            # 记录使用的关键词和角度
            for kw in required_keywords:
                if kw in question:
                    self.used_keywords.append(kw)
                    break

            # 记录使用的角度
            for angle in selected_angles:
                self.used_angles.add(angle)

            break

        # 5. 设置保底题目库
        if retry_count >= max_retries or not question:
            print("⚠️ 多次尝试后仍无法获取有效题目，使用保底题库")
            available_backup = [i for i, q in enumerate(self.backup_questions) if i not in self.used_backup_questions]

            if available_backup:
                selected_idx = random.choice(available_backup)
                question = self.backup_questions[selected_idx]
                self.used_backup_questions.add(selected_idx)
                print(f"使用保底题目: {question}")
            else:
                print("⚠️ 保底题目已用完，跳过本题")
                self.question_count -= 1
                return

        # 处理题目格式
        question = question.split("|")[0].strip()
        question = re.sub(r'^\d+[.\d]*\s*', '', question)
        self.current_question = question
        self.question_dimensions = self._extract_dimensions(question)
        self.generated_questions.append(question)
        messages.append({"role": "assistant", "content": question})
        print(f"\n===== 第{self.question_count}/{self.max_questions}题 =====")
        print(f"考试机器人: {question}")
        self.max_possible_score += 10

    def _handle_user_answer(self, user_answer: str, messages: List[Dict]):
        """多维度结构化评分系统 - 关键词评分精细化"""
        if self.is_cannot_answer(user_answer):
            print(f"\n用户回答: {user_answer}")
            print("\n检测到无法回答的表述，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 本题需要按照题目要求的多个维度回答")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": "0|检测到无法回答的表述|否"})
            return

        is_irrelevant, reason = self.is_irrelevant(user_answer)
        if is_irrelevant:
            print(f"\n用户回答: {user_answer}")
            print(f"\n检测到无关回答: {reason}，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 回答与题目或知识库内容无关，请围绕题目要求作答")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": f"0|{reason}|否"})
            return

        if self._is_duplicating_question(self.current_question, user_answer):
            print(f"\n用户回答: {user_answer}")
            print("\n检测到回答只是重复或修改题目内容，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 回答只是重复或修改题目内容，请提供具体分析和解释")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": "0|回答只是重复或修改题目内容，未提供有效信息|否"})
            return

        # ===== 自动结构化评分 =====
        score_struct = {
            "dimension": 0,  # 维度覆盖
            "core_keywords": 0,  # 核心关键词覆盖
            "ext_keywords": 0,  # 扩展关键词覆盖
            "length": 0,  # 字数/详细程度
            "logic": 0,  # 条理性/逻辑性
            "innovation": 0,  # 创新性/举例/拓展
        }
        max_score_struct = {
            "dimension": 2.5,
            "core_keywords": 2.0,  # 核心关键词权重更高
            "ext_keywords": 1.0,  # 扩展关键词权重较低
            "length": 1.5,
            "logic": 2.0,
            "innovation": 1.0,
        }

        # 维度覆盖打分 - 改为技术实现和用户体验两个维度
        tech_implementation_indicators = {'技术', '实现', '机制', '原理', '代码', '语法', '资源', '句柄', '泄漏'}
        user_experience_indicators = {'用户', '体验', '开发', '效率', '简洁', '方便', '安全', '稳定'}

        has_tech = any(indicator in user_answer for indicator in tech_implementation_indicators)
        has_ux = any(indicator in user_answer for indicator in user_experience_indicators)
        covered_dimensions = sum([has_tech, has_ux])
        score_struct["dimension"] = max_score_struct["dimension"] * (covered_dimensions / 2)  # 两个维度都覆盖得满分

        # 关键词覆盖打分 - 区分核心关键词和扩展关键词
        # 核心关键词
        core_kw_hits = sum(1 for k in self.core_keywords if k in user_answer)
        score_struct["core_keywords"] = max_score_struct["core_keywords"] * min(1, core_kw_hits / 3)

        # 扩展关键词（从知识库中提取的非核心关键词）
        if not self.extended_keywords:
            # 首次使用时提取知识库中的扩展关键词
            all_keywords = self._extract_keywords(self.knowledge_content)
            self.extended_keywords = set(all_keywords) - self.core_keywords

        ext_kw_hits = sum(1 for k in self.extended_keywords if k in user_answer)
        score_struct["ext_keywords"] = max_score_struct["ext_keywords"] * min(1, ext_kw_hits / 5)

        # 长度
        length = len(user_answer)
        if length > 150:
            score_struct["length"] = max_score_struct["length"]
        elif length > 80:
            score_struct["length"] = max_score_struct["length"] * 0.7
        elif length > 40:
            score_struct["length"] = max_score_struct["length"] * 0.4

        # 条理性
        lines = [l for l in user_answer.split('\n') if l.strip()]
        bullet = sum(1 for l in lines if l.strip().startswith(('-', '①', '1.', '一、')))
        score_struct["logic"] = max_score_struct["logic"] * (0.7 if bullet > 0 else 0.4 if len(lines) > 1 else 0.2)

        # 创新性/扩展 - 鼓励通俗类比
        if any(x in user_answer for x in ['比如', '例如', '案例', '举例', '此外', '进一步', '比作', '像']):
            score_struct["innovation"] = max_score_struct["innovation"]

        auto_score = sum(score_struct.values())
        auto_feedback = f"结构化打分: 维度覆盖{score_struct['dimension']:.1f}/{max_score_struct['dimension']}, " \
                        f"核心关键词{score_struct['core_keywords']:.1f}/{max_score_struct['core_keywords']}, " \
                        f"扩展关键词{score_struct['ext_keywords']:.1f}/{max_score_struct['ext_keywords']}, " \
                        f"长度{score_struct['length']:.1f}/{max_score_struct['length']}, " \
                        f"条理性{score_struct['logic']:.1f}/{max_score_struct['logic']}, " \
                        f"创新性{score_struct['innovation']:.1f}/{max_score_struct['innovation']}."

        messages.append({"role": "user", "content": user_answer})
        print(f"\n用户回答: {user_answer}")

        # ===== RAG检索+AI补充评价 =====
        query = f"{self.current_question}\n{user_answer}"
        relevant_passages = self.retrieve_relevant_passages(query, top_k=3)
        context = "\n\n".join(relevant_passages)
        evaluation = self.get_ai_response(messages + [
            {"role": "system", "content": f"""
            你是评分专家。请根据数据爬取场景下文件操作机制的知识，对用户作答给出补充评价和建议（不直接给分数）。
            重点关注：
            1. 是否清晰分析了文件自动创建/关闭对爬虫稳定性的影响（资源管理、句柄释放等）
            2. 是否说明白了对开发效率的提升（聚焦业务逻辑、语法简洁等）
            3. 逻辑是否清晰，是否有通俗类比（如将自动关闭比作"自动关灯"等生活化比喻）
            不要苛求底层技术深度，重点看与数据爬取场景的贴合度和实用分析。
            用户答案：{user_answer}
            """},
        ])
        # AI评分补充调整
        ai_score_adjustment = 0
        if '举例' in evaluation or '创新' in evaluation or '类比' in evaluation:
            ai_score_adjustment += 0.3
        if '不够详细' in evaluation or '比较简单' in evaluation:
            ai_score_adjustment -= 0.3
        if '逻辑混乱' in evaluation or '内容偏离' in evaluation:
            ai_score_adjustment -= 0.5
        final_score = max(0, min(10, auto_score * 0.8 + (auto_score + ai_score_adjustment) * 0.2))

        self.total_score += final_score
        self.question_scores.append(final_score)

        print(f"\n{auto_feedback}")
        print(f"AI补充评价: {evaluation.strip()[:120]}...")
        print(f"\n评分: {final_score:.1f}/10")
        print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")

        feedback = f"{auto_feedback}\nAI补充: {evaluation.strip()}"
        messages.append({"role": "assistant", "content": f"{final_score:.1f}|{feedback}|结构化打分+AI补充评价"})

    def _ask_follow_up(self, messages: List[Dict]):
        pass

    def _show_final_score(self):
        print("\n" + "=" * 50)
        if self.question_count > 0:
            percentage = (self.total_score / self.max_possible_score) * 100 if self.max_possible_score > 0 else 0

            print(f"\n考试结束！共完成 {self.question_count} 道题")
            print("\n各题得分明细:")
            for i, score in enumerate(self.question_scores, 1):
                print(f"第{i}题: {score:.1f}/10")

            print(f"\n最终得分: {self.total_score:.1f}/{self.max_possible_score} ({percentage:.1f}%)")

            if percentage >= 90:
                print("\n成绩卓越！全面掌握了数据爬取中文件操作机制的知识。")
            elif percentage >= 75:
                print("\n成绩优秀！对数据爬取中文件操作机制有深入理解。")
            elif percentage >= 60:
                print("\n成绩良好！掌握了文件操作主要内容，细节有待加强。")
            elif percentage >= 40:
                print("\n成绩及格！需要加强文件操作机制的学习。")
            else:
                print("\n未达标！建议系统复习数据爬取中的文件操作知识。")

            print("\n知识点掌握情况:")
            correct_counts = sum(1 for score in self.question_scores if score >= 6)
            print(f"- 掌握较好的知识点: {correct_counts}/{self.question_count}")
            print(f"- 需要加强的知识点: {self.question_count - correct_counts}/{self.question_count}")
        else:
            print("本次未完成任何题目。")
        print("=" * 50 + "\n")


if __name__ == "__main__":
    CONFIG = {
        "api_key": "sk-ce186a5b911341ba983baf0bf1ee2904",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "file_path": r"D:\桌面\竞赛\挑战杯+人工智能\第3章 数据获取.docx",  # 可替换为实际文件路径
        "max_questions": 5
    }
    try:
        exam_bot = SmartExamBot(
            api_key=CONFIG["api_key"],
            base_url=CONFIG["base_url"],
            max_questions=CONFIG["max_questions"]
        )
        knowledge_content = exam_bot.read_file_content(CONFIG["file_path"])
        print(f"成功加载知识内容，长度: {len(knowledge_content)} 字符")
        print("提取的关键词:", ', '.join(exam_bot._extract_keywords(knowledge_content)))
        print("知识内容预览:", knowledge_content[:200] + "...")
        exam_bot.start_exam(knowledge_content)
    except Exception as e:
        print(f"程序运行出错: {str(e)}")
    finally:
        input("按回车键退出...")