import os
import chardet
import time
import docx
import re
import numpy as np
from typing import List, Dict, Tuple, Any
from openai import OpenAI
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

class SmartExamBot:
    def __init__(self, api_key: str, base_url: str, max_questions: int = 5):
        # 原有初始化代码保持不变...
        self.question_similarity_threshold = 0.6  # 适度降低阈值
        self.used_keywords = []  # 新增：记录已使用关键词
        self.used_angles = set()  # 新增：记录已使用角度
        self保底题目库 = [  # 新增：保底题目库
            "在大规模分布式爬虫场景中，如何通过with...as...语句解决文件句柄耗尽问题？请从资源管理和系统稳定性两个维度说明",
            "对比try-finally语句与with...as...语句在数据安全方面的优劣，从异常处理和资源释放两个维度分析",
            "在高频率动态页面爬取场景中，如何通过自动关闭机制保障数据完整性？请从技术实现和数据可靠性两个维度说明",
            "对比手动关闭文件与自动关闭机制在开发效率方面的差异，从代码量和出错率两个维度分析",
            "在需要断点续爬的场景中，如何通过with...as...语句确保临时文件的正确处理？请从程序健壮性和用户体验两个维度说明",
            "对比单线程爬虫与多线程爬虫中文件句柄管理的差异，从资源竞争和稳定性两个维度分析",
            "在爬取过程突发断电的场景中，自动关闭机制如何保护未完成数据？请从数据恢复和系统保护两个维度说明",
            "对比直接文件操作与with语句在爬虫长期运行中的表现，从内存占用和句柄泄漏两个维度分析",
            "在需要爬取海量小文件的场景中，如何通过with...as...提升处理效率？请从IO优化和代码简洁性两个维度说明",
            "对比不同编程语言中文件自动管理机制的实现，从语法设计和用户体验两个维度分析"
        ]
        self.used保底题目 = set()  # 新增：记录已使用保底题目


    def _chunk_knowledge(self, content: str, chunk_size: int = 500) -> List[str]:
        chunks = []
        words = content.split()
        for i in range(0, len(words), chunk_size):
            chunk = " ".join(words[i:i + chunk_size])
            chunks.append(chunk)
        return chunks

    def _vectorize_knowledge(self):
        if not self.knowledge_passages:
            self.knowledge_passages = self._chunk_knowledge(self.knowledge_content)
        self.vectorizer.fit(self.knowledge_passages)
        self.knowledge_vectors = self.vectorizer.transform(self.knowledge_passages)

    def retrieve_relevant_passages(self, query: str, top_k: int = 3) -> List[str]:
        if self.knowledge_vectors is None:
            self._vectorize_knowledge()
        query_vec = self.vectorizer.transform([query])
        similarities = cosine_similarity(query_vec, self.knowledge_vectors)
        top_indices = np.argsort(similarities[0])[-top_k:][::-1]
        return [self.knowledge_passages[i] for i in top_indices]

    def _is_question_too_similar(self, new_question: str) -> bool:
        if not self.generated_questions:
            return False

        # 关键词重叠率计算
        new_keywords = self._extract_keywords(new_question)
        for old_question in self.generated_questions:
            old_keywords = self._extract_keywords(old_question)
            common = len(set(new_keywords) & set(old_keywords))
            total = len(set(new_keywords) | set(old_keywords))
            keyword_overlap = common / total if total > 0 else 0

            # 余弦相似度计算
            new_vec = self.vectorizer.transform([new_question])
            old_vec = self.vectorizer.transform([old_question])
            cosine_sim = cosine_similarity(new_vec, old_vec)[0][0]

            # 综合判断：两者有一者过高即视为相似
            if cosine_sim > self.question_similarity_threshold or keyword_overlap > 0.7:
                return True
        return False
    def _extract_keywords(self, text: str) -> List[str]:
        words = re.findall(r'\b[\u4e00-\u9fa5]{2,6}\b', text)
        word_counts = Counter(words)
        stop_words = {'可以', '一个', '这个', '我们', '进行', '需要', '应该', '问题', '方法'}
        keywords = [word for word, count in word_counts.most_common(20)
                    if word not in stop_words and count > 1]
        return keywords[:10] if keywords else ["文件句柄", "资源泄漏", "数据爬取"]

    def _handle_new_question(self, messages: List[Dict]):
        self.question_count += 1
        max_retries = 5  # 保持原重试次数
        retry_count = 0
        question = None

        while retry_count < max_retries:
            # 检索范围从Top2增加到Top3
            context_query = "文件操作 数据爬取 with语句"
            relevant_passages = self.retrieve_relevant_passages(context_query, top_k=3)
            context = "\n\n".join(relevant_passages)

            # 3. 多样化题目模板 + 2. 增加提问角度
            available_templates = [
                "请从[技术实现]和[用户体验]分析[关键词]在数据爬取中的重要性",
                "在[具体爬取场景]中，如何通过[关键词]解决[问题]？请从[维度1]和[维度2]说明",
                "对比[方案A]和[方案B]在[关键词]方面的优劣，从[维度1]和[维度2]分析",
                "为什么[关键词]是[具体爬取场景]中必须关注的要点？请从[技术实现]和[开发效率]分析"
            ]
            template = available_templates[retry_count % len(available_templates)]  # 轮换使用模板

            # 5. 动态调整重试策略
            retry_hint = ""
            if retry_count > 0:
                used_kw_str = ", ".join(self.used_keywords[-3:]) if self.used_keywords else "无"
                retry_hint = f"\n重试提示：请换一个全新的角度，避免使用关键词[{used_kw_str}]，避免与已出题目的结构相似"

            prompt = f"""请基于以下知识库内容出一道新颖的主观题（避免与已出题目相似）：
            知识库相关段落：
            {context[:1500]}
            要求：
            1. 必须从以下关键词中选择至少1个（优先未使用的）：{[kw for kw in required_keywords if kw not in self.used_keywords]}
            2. 题目形式请参考模板：{template}
            3. 确保题目在知识库中有明确答案依据
            4. 必须包含新的分析角度，不重复已使用角度：{self.used_angles}
            5. 从不同维度提问（如：资源管理、开发效率、数据完整性等）
            6. 仅输出题目内容，不要包含其他说明
            {retry_hint}"""

            messages.append({"role": "user", "content": prompt})
            question = self.get_ai_response(messages)

            # 验证题目有效性（原有验证逻辑保持不变）
            validation_failed = False
            required_keywords = {'文件句柄', '资源泄漏', 'with...as...', '自动关闭', '数据安全', '开发效率',
                                 '爬虫稳定性'}
            if not any(keyword in question for keyword in required_keywords):
                print("⚠️ 题目未包含必要关键词，重新生成...")
                validation_failed = True
            elif len(re.findall(r'技术实现|用户体验|系统稳定性|数据可靠性', question)) < 2:
                print("⚠️ 题目未明确要求两个分析维度，重新生成...")
                validation_failed = True
            elif self.image_patterns.search(question):
                print("⚠️ 题目包含图片/源程序相关表述，重新生成...")
                validation_failed = True
            elif not question or len(question) < 10:
                print("⚠️ 题目过短，重新生成...")
                validation_failed = True
            elif self._is_question_too_similar(question):
                print("⚠️ 题目与历史题目过于相似，重新生成...")
                validation_failed = True

            if validation_failed:
                retry_count += 1
                messages.pop()
                continue

            # 记录使用的关键词和角度
            for kw in required_keywords:
                if kw in question:
                    self.used_keywords.append(kw)
                    break
            self.used_angles.add(question.split('分析')[0].strip())  # 提取问题角度
            break
    

    def _extract_dimensions(self, question: str) -> List[str]:
        match = re.search(r'请从(.*?)分析', question)
        if match:
            dimensions_text = match.group(1)
            dimensions = [dim.strip() for dim in re.split(r'、|和|与', dimensions_text) if dim.strip()]
            return dimensions
        return []

    def _is_duplicating_question(self, question: str, answer: str) -> bool:
        if not question or not answer:
            return False

        def preprocess(text):
            text = re.sub(r'[^\w\s]', '', text)
            text = re.sub(r'请从|分析|简述|说明|论述|谈谈|如何|什么|为什么|是什么|有哪些', '', text,
                          flags=re.IGNORECASE)
            return text.strip()

        processed_question = preprocess(question)
        processed_answer = preprocess(answer)

        if len(processed_answer) < len(processed_question) * 0.5:
            return True

        common_chars = 0
        min_len = min(len(processed_question), len(processed_answer))
        for i in range(min_len):
            if processed_question[i] == processed_answer[i]:
                common_chars += 1

        if min_len > 0 and common_chars / min_len > 0.3:
            return True

        question_words = set(re.findall(r'\b[\u4e00-\u9fa5a-zA-Z]{2,}\b', processed_question))
        answer_words = set(re.findall(r'\b[\u4e00-\u9fa5a-zA-Z]{2,}\b', processed_answer))

        if question_words and len(answer_words.intersection(question_words)) / len(question_words) > 0.7:
            return True

        return True if processed_answer in processed_question or processed_question in processed_answer else False

    def detect_encoding(self, file_path: str) -> str:
        if file_path.lower().endswith('.docx'):
            return 'docx'
        try:
            with open(file_path, 'rb') as file:
                result = chardet.detect(file.read(10240))
                return result['encoding'] or 'utf-8'
        except Exception as e:
            raise Exception(f"编码检测失败: {str(e)}")

    def read_file_content(self, file_path: str) -> str:
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")

        if not os.path.isfile(file_path):
            raise IsADirectoryError(f"{file_path}不是一个文件")

        try:
            encoding = self.detect_encoding(file_path)

            if encoding == 'docx':
                doc = docx.Document(file_path)
                full_text = []
                for para in doc.paragraphs:
                    full_text.append(para.text)
                for table in doc.tables:
                    for row in table.rows:
                        row_text = [cell.text for cell in row.cells]
                        full_text.append('\t'.join(row_text))
                return '\n'.join(full_text)

            encodings_to_try = [encoding, 'utf-8', 'gbk', 'latin-1']
            for enc in encodings_to_try:
                try:
                    with open(file_path, 'r', encoding=enc, errors='replace') as file:
                        return file.read()
                except UnicodeDecodeError:
                    continue
            raise Exception("无法识别文件编码")
        except Exception as e:
            raise Exception(f"文件读取失败: {str(e)}")

    def get_ai_response(self, messages: List[Dict], max_retries: int = 3) -> str:
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-v3",
                    messages=messages,
                    temperature=0.7,
                    timeout=30
                )
                return response.choices[0].message.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                print(f"API调用失败，正在重试({retry_count}/{max_retries})...")
                time.sleep(2)

    def is_cannot_answer(self, user_input: str) -> bool:
        user_input = user_input.strip()
        if not user_input or re.fullmatch(r'[\s,.!?;]+', user_input):
            return True
        return self.cannot_answer_patterns.search(user_input) is not None

    def is_irrelevant(self, user_answer: str) -> Tuple[bool, str]:
        user_answer = user_answer.strip().lower()
        if self.irrelevant_patterns.search(user_answer):
            return True, "回答明确表示与题目无关"
        question_keywords = re.findall(r'\b[\u4e00-\u9fa5]{2,6}\b', self.current_question)
        all_keywords = question_keywords + self.keywords + self.question_dimensions
        has_relevant_keywords = any(keyword in user_answer for keyword in all_keywords)
        if not has_relevant_keywords:
            for topic in self.unrelated_topics:
                if topic in user_answer:
                    return True, f"回答涉及无关主题({topic})，视为乱答"
            if len(user_answer) < 5:
                return True, "回答过短且不涉及题目相关内容，视为乱答"
            return True, "回答与题目及知识库内容无关，视为乱答"
        return False, ""

    def start_exam(self, knowledge_content: str):
        self.knowledge_content = knowledge_content
        self.keywords = self._extract_keywords(knowledge_content)
        self._vectorize_knowledge()
        print("\n" + "=" * 50)
        print("智能考试机器人".center(40))
        print("=" * 50)
        print("\n我将根据数据爬取场景下的文件操作机制知识对你进行考察。")
        print(f"考试共{self.max_questions}题。")
        print("输入'退出'可随时结束考试。")
        print("注意：可以输入多行内容，以空行结束输入\n")
        messages = [
            {"role": "system", "content": f"""
            你是一个公平的智能考试机器人，需要基于数据爬取场景下的文件操作机制相关知识出题和评分：
            核心考察点：文件自动创建与关闭机制在数据爬取中的重要性，尤其是with...as...语句的优势

            工作模式：

            1. 题目生成模式：
            - 必须围绕数据爬取场景下的文件操作机制
            - 题目必须包含至少1个关键词：文件句柄、资源泄漏、with...as...、自动关闭、数据安全、开发效率、爬虫稳定性
            - 必须要求从技术实现、用户体验两个维度回答
            - 示例："请从技术实现和用户体验两个维度分析，在数据爬取中使用with...as...语句管理文件的优势"
            - 只返回题目，不要包含评分信息、评分标准等其他任何信息
            - 题目必须多样化，避免与已出题目相似
            - 不要给出回答建议
            - 尽量从不同角度提问（如：资源管理、开发效率、数据完整性等）
            """},
        ]
        while self.question_count < self.max_questions:
            self._handle_new_question(messages)
            print("\n你: ", end="", flush=True)
            user_input_lines = []
            while True:
                try:
                    line = input().strip()
                except EOFError:
                    line = "退出"
                if line.lower() in ['退出', 'q', 'quit']:
                    self._show_final_score()
                    return
                if not line and user_input_lines:
                    user_input = '\n'.join(user_input_lines)
                    break
                if line:
                    user_input_lines.append(line)
                elif not user_input_lines:
                    print("请输入内容或命令：", end="", flush=True)
                    continue
            self._handle_user_answer(user_input, messages)
            print(f"\n本题结束，当前进度: {self.question_count}/{self.max_questions}题")
        self._show_final_score()

    def _handle_new_question(self, messages: List[Dict]):
        self.question_count += 1
        max_retries = 5
        retry_count = 0
        question = None
        while retry_count < max_retries:
            context_query = "文件操作 数据爬取 with语句"
            relevant_passages = self.retrieve_relevant_passages(context_query, top_k=2)
            context = "\n\n".join(relevant_passages)
            prompt = f"""请基于以下知识库内容出一道新颖的主观题（避免与已出题目相似）：
            知识库相关段落：
            {context[:1500]}
            要求：
            1. 必须从以下关键词中选择至少1个：文件句柄、资源泄漏、with...as...、自动关闭、数据安全、开发效率、爬虫稳定性
            2. 题目形式应为：
               "请从[技术实现]和[用户体验]分析[关键词]..."
            3. 确保题目在知识库中有明确答案依据
            4. 题目必须与前{len(self.generated_questions)}道已出题目有明显区别
            5. 从不同角度提问（如：资源管理、开发效率、数据完整性等）
            6. 仅输出题目内容，不要输出"评分标准"等其他所有内容、过滤额外说明内容
            7. 绝对不要包含任何图片、图示、源程序等描述"""
            messages.append({"role": "user", "content": prompt})
            question = self.get_ai_response(messages)
            validation_failed = False
            required_keywords = {'文件句柄', '资源泄漏', 'with...as...', '自动关闭', '数据安全', '开发效率', '爬虫稳定性'}
            if not any(keyword in question for keyword in required_keywords):
                print("⚠️ 题目未包含必要关键词，重新生成...")
                validation_failed = True
            elif len(re.findall(r'技术实现|用户体验', question)) < 2:
                print("⚠️ 题目未明确要求技术实现和用户体验两个维度，重新生成...")
                validation_failed = True
            elif self.image_patterns.search(question):
                print("⚠️ 题目包含图片/源程序相关表述，重新生成...")
                validation_failed = True
            elif not question or len(question) < 10:
                print("⚠️ 题目过短，重新生成...")
                validation_failed = True
            elif self._is_question_too_similar(question):
                print("⚠️ 题目与历史题目过于相似，重新生成...")
                validation_failed = True
            if validation_failed:
                retry_count += 1
                messages.pop()
                continue
            break
        if retry_count >= max_retries or not question:
            print("⚠️ 多次尝试后仍无法获取有效题目，跳过本题")
            self.question_count -= 1
            return
        question = question.split("|")[0].strip()
        question = re.sub(r'^\d+[.\d]*\s*', '', question)
        self.current_question = question
        self.question_dimensions = self._extract_dimensions(question)
        self.generated_questions.append(question)
        messages.append({"role": "assistant", "content": question})
        print(f"\n===== 第{self.question_count}/{self.max_questions}题 =====")
        print(f"考试机器人: {question}")
        self.max_possible_score += 10

    # ====== 结构化自动评分+AI补充 主观题评分 ======
    def _handle_user_answer(self, user_answer: str, messages: List[Dict]):
        """多维度结构化评分系统"""
        if self.is_cannot_answer(user_answer):
            print(f"\n用户回答: {user_answer}")
            print("\n检测到无法回答的表述，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 本题需要按照题目要求的多个维度回答")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": "0|检测到无法回答的表述|否"})
            return

        is_irrelevant, reason = self.is_irrelevant(user_answer)
        if is_irrelevant:
            print(f"\n用户回答: {user_answer}")
            print(f"\n检测到无关回答: {reason}，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 回答与题目或知识库内容无关，请围绕题目要求作答")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": f"0|{reason}|否"})
            return

        if self._is_duplicating_question(self.current_question, user_answer):
            print(f"\n用户回答: {user_answer}")
            print("\n检测到回答只是重复或修改题目内容，按规则得0分")
            self.total_score += 0
            self.question_scores.append(0)
            print(f"\n评分: 0.0/10")
            print(f"反馈: 回答只是重复或修改题目内容，请提供具体分析和解释")
            print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")
            messages.append({"role": "user", "content": user_answer})
            messages.append({"role": "assistant", "content": "0|回答只是重复或修改题目内容，未提供有效信息|否"})
            return

        # ===== 自动结构化评分 =====
        score_struct = {
            "dimension": 0,    # 维度覆盖
            "keywords": 0,     # 关键内容覆盖
            "length": 0,       # 字数/详细程度
            "logic": 0,        # 条理性/逻辑性
            "innovation": 0,   # 创新性/举例/拓展
        }
        max_score_struct = {
            "dimension": 2.5,
            "keywords": 3.0,
            "length": 1.5,
            "logic": 2.0,
            "innovation": 1.0,
        }

        # 维度覆盖打分 - 改为技术实现和用户体验两个维度
        tech_implementation_indicators = {'技术', '实现', '机制', '原理', '代码', '语法', '资源', '句柄', '泄漏'}
        user_experience_indicators = {'用户', '体验', '开发', '效率', '简洁', '方便', '安全', '稳定'}

        has_tech = any(indicator in user_answer for indicator in tech_implementation_indicators)
        has_ux = any(indicator in user_answer for indicator in user_experience_indicators)
        covered_dimensions = sum([has_tech, has_ux])
        score_struct["dimension"] = max_score_struct["dimension"] * (covered_dimensions / 2)  # 两个维度都覆盖得满分

        # 关键内容覆盖打分 - 改为数据爬取文件操作相关关键词
        crawl_keywords = {'文件句柄', '资源泄漏', 'with...as...', '自动关闭', '自动管理', '语法简洁',
                          '用户体验', '开发聚焦', '数据安全', '爬虫稳定性', '资源管理', '句柄释放'}
        kw_hits = sum(1 for k in crawl_keywords if k in user_answer)
        score_struct["keywords"] = max_score_struct["keywords"] * min(1, kw_hits / 3)  # 至少出现3个相关关键词得满分

        # 长度
        length = len(user_answer)
        if length > 150:
            score_struct["length"] = max_score_struct["length"]
        elif length > 80:
            score_struct["length"] = max_score_struct["length"] * 0.7
        elif length > 40:
            score_struct["length"] = max_score_struct["length"] * 0.4

        # 条理性
        lines = [l for l in user_answer.split('\n') if l.strip()]
        bullet = sum(1 for l in lines if l.strip().startswith(('-', '①', '1.', '一、')))
        score_struct["logic"] = max_score_struct["logic"] * (0.7 if bullet > 0 else 0.4 if len(lines)>1 else 0.2)

        # 创新性/扩展 - 鼓励通俗类比
        if any(x in user_answer for x in ['比如', '例如', '案例', '举例', '此外', '进一步', '比作', '像']):
            score_struct["innovation"] = max_score_struct["innovation"]

        auto_score = sum(score_struct.values())
        auto_feedback = f"结构化打分: 维度覆盖{score_struct['dimension']:.1f}/{max_score_struct['dimension']}, " \
                        f"关键词覆盖{score_struct['keywords']:.1f}/{max_score_struct['keywords']}, " \
                        f"长度{score_struct['length']:.1f}/{max_score_struct['length']}, " \
                        f"条理性{score_struct['logic']:.1f}/{max_score_struct['logic']}, " \
                        f"创新性{score_struct['innovation']:.1f}/{max_score_struct['innovation']}."

        messages.append({"role": "user", "content": user_answer})
        print(f"\n用户回答: {user_answer}")

        # ===== RAG检索+AI补充评价 =====
        query = f"{self.current_question}\n{user_answer}"
        relevant_passages = self.retrieve_relevant_passages(query, top_k=3)
        context = "\n\n".join(relevant_passages)
        evaluation = self.get_ai_response(messages + [
            {"role": "system", "content": f"""
            你是评分专家。请根据数据爬取场景下文件操作机制的知识，对用户作答给出补充评价和建议（不直接给分数）。
            重点关注：
            1. 是否清晰分析了文件自动创建/关闭对爬虫稳定性的影响（资源管理、句柄释放等）
            2. 是否说明白了对开发效率的提升（聚焦业务逻辑、语法简洁等）
            3. 逻辑是否清晰，是否有通俗类比（如将自动关闭比作"自动关灯"等生活化比喻）
            不要苛求底层技术深度，重点看与数据爬取场景的贴合度和实用分析。
            用户答案：{user_answer}
            """},
        ])
        # AI评分补充调整
        ai_score_adjustment = 0
        if '举例' in evaluation or '创新' in evaluation or '类比' in evaluation:
            ai_score_adjustment += 0.3
        if '不够详细' in evaluation or '比较简单' in evaluation:
            ai_score_adjustment -= 0.3
        if '逻辑混乱' in evaluation or '内容偏离' in evaluation:
            ai_score_adjustment -= 0.5
        final_score = max(0, min(10, auto_score * 0.8 + (auto_score + ai_score_adjustment) * 0.2))

        self.total_score += final_score
        self.question_scores.append(final_score)

        print(f"\n{auto_feedback}")
        print(f"AI补充评价: {evaluation.strip()[:120]}...")
        print(f"\n评分: {final_score:.1f}/10")
        print(f"累计得分: {self.total_score:.1f}/{self.max_possible_score}")

        feedback = f"{auto_feedback}\nAI补充: {evaluation.strip()}"
        messages.append({"role": "assistant", "content": f"{final_score:.1f}|{feedback}|结构化打分+AI补充评价"})

    def _ask_follow_up(self, messages: List[Dict]):
        pass

    def _show_final_score(self):
        print("\n" + "=" * 50)
        if self.question_count > 0:
            percentage = (self.total_score / self.max_possible_score) * 100 if self.max_possible_score > 0 else 0

            print(f"\n考试结束！共完成 {self.question_count} 道题")
            print("\n各题得分明细:")
            for i, score in enumerate(self.question_scores, 1):
                print(f"第{i}题: {score:.1f}/10")

            print(f"\n最终得分: {self.total_score:.1f}/{self.max_possible_score} ({percentage:.1f}%)")

            if percentage >= 90:
                print("\n成绩卓越！全面掌握了数据爬取中文件操作机制的知识。")
            elif percentage >= 75:
                print("\n成绩优秀！对数据爬取中文件操作机制有深入理解。")
            elif percentage >= 60:
                print("\n成绩良好！掌握了文件操作主要内容，细节有待加强。")
            elif percentage >= 40:
                print("\n成绩及格！需要加强文件操作机制的学习。")
            else:
                print("\n未达标！建议系统复习数据爬取中的文件操作知识。")

            print("\n知识点掌握情况:")
            correct_counts = sum(1 for score in self.question_scores if score >= 6)
            print(f"- 掌握较好的知识点: {correct_counts}/{self.question_count}")
            print(f"- 需要加强的知识点: {self.question_count - correct_counts}/{self.question_count}")
        else:
            print("本次未完成任何题目。")
        print("=" * 50 + "\n")

if __name__ == "__main__":
    CONFIG = {
        "api_key": "sk-ce186a5b911341ba983baf0bf1ee2904",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "file_path": r"D:\桌面\竞赛\挑战杯+人工智能\第3章 数据获取.docx",  # 可替换为实际文件路径
        "max_questions": 5
    }
    try:
        exam_bot = SmartExamBot(
            api_key=CONFIG["api_key"],
            base_url=CONFIG["base_url"],
            max_questions=CONFIG["max_questions"]
        )
        knowledge_content = exam_bot.read_file_content(CONFIG["file_path"])
        print(f"成功加载知识内容，长度: {len(knowledge_content)} 字符")
        print("提取的关键词:", ', '.join(exam_bot._extract_keywords(knowledge_content)))
        print("知识内容预览:", knowledge_content[:200] + "...")
        exam_bot.start_exam(knowledge_content)
    except Exception as e:
        print(f"程序运行出错: {str(e)}")
    finally:
        input("按回车键退出...")