import os
import json
import re
import jieba
import numpy as np
from docx import Document
import requests
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from collections import defaultdict
from typing import List, Dict, Tuple

# 配置参数 - 修改为支持多个知识库文件
KNOWLEDGE_PATHS = [r"D:\桌面\竞赛\挑战杯+人工智能\第3章 数据获取.docx"]  # 支持多种格式
API_URL = "https://api.deepseek.com/v1/chat/completions"
API_KEY = "sk-faa84a6e39ed4a78a90b49b8fb811bfc"
MAX_ATTEMPTS = 5  # 题目生成最大尝试次数
BANNED_WORDS = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
INVALID_ANSWERS = ["不会", "没学", "乱答", "不知道", "跳过", "忘了"]


class KnowledgeBaseProcessor:
    """知识库处理与向量缓存 - 支持多个文件"""

    def __init__(self, file_paths: List[str]):
        self.file_paths = file_paths
        # 修复token_pattern警告 - 显式设置为None
        self.vectorizer = TfidfVectorizer(tokenizer=self._tokenize, token_pattern=None, max_features=1000)
        self.doc_vectors = None
        self.keywords = None
        self._load_and_process()

    def _tokenize(self, text: str) -> List[str]:
        """中文分词与过滤 - 兼容不同版本的jieba"""
        try:
            # 尝试使用lcut
            words = jieba.lcut(text)
        except AttributeError:
            # 如果没有lcut，使用cut并转换为列表
            words = list(jieba.cut(text))
        # 过滤停用词和单字
        return [word for word in words if len(word) > 1 and not re.match(r'^\W+$', word)]

    def _load_and_process(self):
        """加载多个知识库文件并计算TF-IDF"""
        self.documents = []

        # 读取所有知识库文件内容
        for file_path in self.file_paths:
            if os.path.exists(file_path):
                try:
                    # 处理docx文件
                    if file_path.endswith('.docx'):
                        doc = Document(file_path)
                        content = "\n".join([para.text for para in doc.paragraphs])
                        # 将整个文档作为单个文档处理，而不是分割成行
                        self.documents.append(content)
                    # 处理txt文件
                    elif file_path.endswith('.txt'):
                        try:
                            with open(file_path, 'r', encoding='utf-8') as f:
                                # 将整个文件作为单个文档处理
                                self.documents.append(f.read())
                        except UnicodeDecodeError:
                            # 尝试其他编码
                            with open(file_path, 'r', encoding='gbk', errors='replace') as f:
                                self.documents.append(f.read())
                    else:
                        print(f"警告: 不支持的文件格式 {file_path}，已跳过")
                except Exception as e:
                    print(f"处理文件 {file_path} 时出错: {e}")
            else:
                print(f"警告: 知识库文件 {file_path} 不存在，已跳过")

        if not self.documents:
            raise ValueError("未加载到任何知识库内容，请检查文件路径")

        try:
            # 计算TF-IDF向量
            self.doc_vectors = self.vectorizer.fit_transform(self.documents)

            # 提取关键词 (过滤高频非关键词)
            feature_names = self.vectorizer.get_feature_names_out()

            # 确保有特征存在
            if len(feature_names) == 0:
                raise ValueError("无法提取任何关键词 - 请检查文档内容")

            tfidf_scores = np.asarray(self.doc_vectors.sum(axis=0)).ravel()
            sorted_indices = np.argsort(tfidf_scores)[::-1]

            # 取前10%作为关键词，排除过于高频的词
            top_n = max(50, len(sorted_indices) // 10)
            self.keywords = [feature_names[i] for i in sorted_indices[:top_n]
                             if tfidf_scores[i] < 0.5 * tfidf_scores.max()]

            # 添加调试信息
            print(f"成功提取 {len(self.keywords)} 个关键词")

        except ValueError as ve:
            print(f"TF-IDF处理错误: {ve}")
            # 备选方案：使用简单分词提取关键词
            all_text = " ".join(self.documents)
            try:
                words = jieba.lcut(all_text)
            except AttributeError:
                words = list(jieba.cut(all_text))
            word_freq = {}
            for word in words:
                if len(word) > 1 and not re.match(r'^\W+$', word):
                    word_freq[word] = word_freq.get(word, 0) + 1
            # 取频率最高的50个词
            sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
            self.keywords = [word for word, freq in sorted_words[:50]]
            print(f"使用备选方法提取 {len(self.keywords)} 个关键词")

    def get_keywords(self) -> List[str]:
        return self.keywords


class ExamSystem:
    """考试系统核心功能"""

    def __init__(self, kb_processor: KnowledgeBaseProcessor):
        self.kb = kb_processor
        self.generated_questions = []
        self.backup_questions = self._generate_backup_questions()
        self.current_score = 0
        self.question_count = 0

    def _generate_backup_questions(self) -> List[str]:
        """生成保底题库"""
        # 使用所有文档内容生成保底题库
        prompt = f"基于以下知识库内容生成5个考试题目:\n{self.kb.documents[:10]}\n要求: 每个题目必须包含至少一个关键词({', '.join(self.kb.keywords[:10])})"
        response = self._call_deepseek(prompt)
        return [q.strip() for q in response.split('\n') if q.strip()][:5]



    def _call_deepseek(self, prompt: str) -> str:
        """调用DeepSeek API，如果失败则返回空字符串"""
        headers = {"Authorization": f"Bearer {API_KEY}"}
        payload = {
            "model": "deepseek-v3",
            "messages": [{"role": "user", "content": prompt}],
            "max_tokens": 1000,
            "temperature": 0.7
        }
        try:
            response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
            response_data = response.json()

            # 检查是否存在认证错误
            if 'error' in response_data and response_data['error']['type'] == 'authentication_error':
                print(f"API认证失败: {response_data['error']['message']}")
                return ""

            # 检查响应是否包含choices字段
            if 'choices' not in response_data or not response_data['choices']:
                print(f"API响应不包含有效choices字段: {response_data}")
                return ""

            return response_data['choices'][0]['message']['content']
        except Exception as e:
            print(f"API调用失败: {e}")
            return ""

    def _generate_backup_questions(self) -> List[str]:
        """生成保底题库 - 不依赖API"""
        print("使用本地保底题库生成器")
        # 从关键词中生成基础问题
        base_questions = [
            f"请解释'{self.kb.keywords[i]}'和'{self.kb.keywords[i + 1]}'之间的关系"
            for i in range(0, min(10, len(self.kb.keywords) - 1), 2)
        ]

        # 确保至少有5个问题
        while len(base_questions) < 5 and len(self.kb.keywords) >= 3:
            # 随机选择3个关键词生成问题
            import random
            kws = random.sample(self.kb.keywords, 3)
            base_questions.append(f"请阐述'{kws[0]}'、'{kws[1]}'和'{kws[2]}'三者之间的联系")

        return base_questions[:5]  # 返回前5个问题

    def _generate_feedback(self, question: str, answer: str, score: int, core: int, ext: int) -> str:
        """生成评分反馈 - API不可用时提供本地反馈"""
        # 尝试使用API生成反馈
        prompt = (
            f"""作为考官，请对以下答题进行结构化反馈:
            问题: {question}
            回答: {answer}
            评分结果: 总分{score}/10 (核心分:{core}, 扩展分:{ext})
            反馈要求: 1) 指出回答中的优点 2) 说明扣分原因 3) 给出改进建议"
        """)
        feedback = self._call_deepseek(prompt)

        # 如果API不可用，提供简单的本地反馈
        if not feedback:
            feedback = f"评分结果: {score}/10\n"
            if score >= 8:
                feedback += "优点: 回答全面，涵盖了核心关键词。\n建议: 可以进一步扩展细节。"
            elif score >= 5:
                feedback += "优点: 回答包含部分核心关键词。\n扣分原因: 部分核心关键词未涉及。\n建议: 加强对核心概念的理解和阐述。"
            else:
                feedback += "扣分原因: 回答未能涵盖足够的核心关键词。\n建议: 仔细阅读问题，确保回答包含所有相关的核心概念。"

        return feedback


    def _contains_banned_words(self, question: str) -> bool:
        """检查是否包含禁止词"""
        return any(banned in question for banned in BANNED_WORDS)

    def _calculate_similarity(self, new_question: str) -> float:
        """计算与已有题目的最大相似度"""
        if not self.generated_questions:
            return 0.0

        new_vec = self.kb.vectorizer.transform([new_question])
        existing_vecs = self.kb.vectorizer.transform(self.generated_questions)
        similarities = cosine_similarity(new_vec, existing_vecs)
        return similarities.max()

    def generate_question(self) -> str:
        """生成符合要求的题目"""
        attempts = 0
        while attempts < MAX_ATTEMPTS:
            # 使用关键词构建提示
            keywords = np.random.choice(self.kb.keywords, 3, replace=False)
            prompt = (
                f"基于知识库生成一个考试题目，要求:"
                f"\n1. 必须包含关键词: {', '.join(keywords)}"
                f"\n2. 禁止包含以下词语: {', '.join(BANNED_WORDS)}"
                f"\n3. 题目形式为问答题"
                f"\n输出格式: 直接输出题目内容"
            )

            question = self._call_deepseek(prompt).strip()

            # 验证生成的题目
            if not question:
                attempts += 1
                continue

            if self._contains_banned_words(question):
                attempts += 1
                continue

            similarity = self._calculate_similarity(question)
            if similarity > 0.7:
                attempts += 1
                continue

            self.generated_questions.append(question)
            return question

        # 使用保底题库
        if self.backup_questions:
            backup = self.backup_questions.pop(0)
            self.generated_questions.append(backup)
            return backup
        else:
            # 最终保底方案
            keywords = np.random.choice(self.kb.keywords, 2, replace=False)
            return f"请解释'{keywords[0]}'和'{keywords[1]}'之间的关系"

    def evaluate_answer(self, question: str, answer: str) -> Tuple[int, str]:
        """评估答案并返回分数和反馈"""
        # 检测无效回答
        if any(inv in answer for inv in INVALID_ANSWERS) or len(answer) < 10:
            return 0, "检测到无效回答，得0分"

        # 关键词评分
        q_keywords = self._extract_keywords(question)
        a_keywords = self._extract_keywords(answer)

        # 核心关键词匹配
        core_score = sum(3 for kw in q_keywords[:3] if kw in a_keywords) if q_keywords else 0
        # 扩展关键词匹配
        extended_score = sum(1 for kw in q_keywords[3:6] if kw in a_keywords) if q_keywords else 0

        total_score = min(core_score + extended_score, 10)  # 单题满分10分

        # 生成详细反馈
        feedback = self._generate_feedback(question, answer, total_score, core_score, extended_score)
        return total_score, feedback

    def _extract_keywords(self, text: str) -> List[str]:
        """提取文本关键词 - 兼容不同版本的jieba"""
        try:
            words = jieba.lcut(text)
        except AttributeError:
            words = list(jieba.cut(text))
        return [word for word in words if word in self.kb.keywords]

    def _generate_feedback(self, question: str, answer: str, score: int, core: int, ext: int) -> str:
        """生成评分反馈"""
        prompt = (
            f"作为考官，请对以下答题进行结构化反馈:\n"
            f"问题: {question}\n"
            f"回答: {answer}\n"
            f"评分结果: 总分{score}/10 (核心分:{core}, 扩展分:{ext})\n"
            f"反馈要求: 1) 指出回答中的优点 2) 说明扣分原因 3) 给出改进建议"
        )
        return self._call_deepseek(prompt)

    def start_exam(self):
        """启动考试流程"""
        print("考试开始! 共5题，每题10分，满分50分")
        self.current_score = 0

        for i in range(5):
            self.question_count += 1
            print(f"\n--- 第{i + 1}题 ---")
            question = self.generate_question()
            print(f"问题: {question}")

            print("请输入答案 (支持多行输入，单独一行输入'END'结束):")
            user_input = []
            while True:
                line = input()
                if line.strip() == 'END':
                    break
                user_input.append(line)
            answer = '\n'.join(user_input)

            score, feedback = self.evaluate_answer(question, answer)
            self.current_score += score

            print(f"\n评分结果: {score}/10")
            print(f"详细反馈: {feedback}")

        print(f"\n考试结束! 最终得分: {self.current_score}/50")


if __name__ == "__main__":
    # 初始化知识库处理器 - 支持多个文件
    try:
        kb_processor = KnowledgeBaseProcessor(KNOWLEDGE_PATHS)
        print(f"知识库加载完成，共加载 {len(kb_processor.documents)} 个文档片段")
        print(f"提取关键词: {', '.join(kb_processor.keywords[:10])}等")
    except Exception as e:
        print(f"初始化失败: {e}")
        exit(1)

    # 启动考试系统
    exam_system = ExamSystem(kb_processor)
    exam_system.start_exam()