import os
import json
import re
import jieba
import numpy as np
from docx import Document
import requests
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from collections import defaultdict
from typing import List, Dict, Tuple

# 配置参数 - 修改为支持多个知识库文件
KNOWLEDGE_PATHS = [r"D:\桌面\竞赛\挑战杯+人工智能\第3章 数据获取.docx"]  # 使用原始字符串避免转义问题
API_URL = "https://api.deepseek.com/v1/chat/completions"
API_KEY = "sk-faa84a6e39ed4a78a90b49b8fb811bfc"
MAX_ATTEMPTS = 5  # 题目生成最大尝试次数
BANNED_WORDS = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
INVALID_ANSWERS = ["不会", "没学", "乱答", "不知道", "跳过", "忘了"]


class KnowledgeBaseProcessor:
    """知识库处理与向量缓存 - 支持多个文件"""

    def __init__(self, file_paths: List[str]):
        self.file_paths = file_paths
        # 确保使用兼容的分词方法
        self._ensure_jieba_compatibility()
        self.vectorizer = TfidfVectorizer(tokenizer=self._tokenize, max_features=1000)
        self.doc_vectors = None
        self.keywords = None
        self._load_and_process()

    def _ensure_jieba_compatibility(self):
        """确保 jieba 兼容性"""
        if not hasattr(jieba, 'lcut'):
            # 为旧版本 jieba 添加 lcut 方法
            jieba.lcut = lambda text: list(jieba.cut(text))
            print("使用 jieba 兼容模式")

    def _tokenize(self, text: str) -> List[str]:
        """中文分词与过滤 - 增强兼容性"""
        try:
            # 尝试使用 lcut 方法
            words = jieba.lcut(text)
        except Exception:
            # 回退到基本方法
            words = list(jieba.cut(text))

        # 增强过滤逻辑：移除数字、标点和单字
        return [word for word in words
                if len(word) > 1
                and not re.match(r'^\d+$', word)  # 过滤纯数字
                and not re.match(r'^\W+$', word)  # 过滤纯标点
                and word.strip()]  # 确保不是空字符串

    def _load_and_process(self):
        """加载多个知识库文件并计算TF-IDF"""
        self.documents = []

        # 读取所有知识库文件内容
        for file_path in self.file_paths:
            # 标准化路径格式
            normalized_path = os.path.normpath(file_path)
            if os.path.exists(normalized_path):
                try:
                    # 处理docx文件
                    if normalized_path.lower().endswith('.docx'):
                        doc = Document(normalized_path)
                        content = "\n".join([para.text for para in doc.paragraphs if para.text.strip()])
                        # 将整个文档作为单个文档处理
                        if content.strip():
                            self.documents.append(content.strip())
                            print(f"已加载 DOCX 文件: {normalized_path}")
                    # 处理txt文件
                    elif normalized_path.lower().endswith('.txt'):
                        with open(normalized_path, 'r', encoding='utf-8', errors='ignore') as f:
                            content = f.read()
                            if content.strip():
                                self.documents.append(content.strip())
                                print(f"已加载 TXT 文件: {normalized_path}")
                    else:
                        print(f"警告: 不支持的文件格式 {normalized_path}，已跳过")
                except Exception as e:
                    print(f"处理文件 {normalized_path} 时出错: {str(e)}")
            else:
                print(f"警告: 知识库文件 {normalized_path} 不存在，已跳过")

        if not self.documents:
            raise ValueError("未加载到任何知识库内容，请检查文件路径")

        print(f"成功加载 {len(self.documents)} 个文档，总字符数: {sum(len(doc) for doc in self.documents)}")

        try:
            # 计算TF-IDF向量
            self.doc_vectors = self.vectorizer.fit_transform(self.documents)

            # 提取关键词 (过滤高频非关键词)
            feature_names = self.vectorizer.get_feature_names_out()

            # 确保有特征存在
            if len(feature_names) == 0:
                raise ValueError("无法提取任何关键词 - 请检查文档内容")

            tfidf_scores = np.asarray(self.doc_vectors.sum(axis=0)).ravel()
            sorted_indices = np.argsort(tfidf_scores)[::-1]

            # 取前10%作为关键词，排除过于高频的词
            top_n = max(50, len(sorted_indices) // 10)
            self.keywords = [feature_names[i] for i in sorted_indices[:top_n]]

            print(f"成功提取 {len(self.keywords)} 个关键词")

        except Exception as e:  # 捕获所有异常类型
            print(f"TF-IDF处理错误: {str(e)}")
            # 备选方案：使用简单分词提取关键词
            all_text = " ".join(self.documents)
            words = self._tokenize(all_text)
            word_freq = defaultdict(int)
            for word in words:
                word_freq[word] += 1

            # 取频率最高的50个词，排除停用词
            sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
            self.keywords = [word for word, freq in sorted_words[:50]]
            print(f"使用备选方法提取 {len(self.keywords)} 个关键词")

    def get_keywords(self) -> List[str]:
        return self.keywords


class ExamSystem:
    """考试系统核心功能"""

    def __init__(self, kb_processor: KnowledgeBaseProcessor):
        self.kb = kb_processor
        self.generated_questions = []
        self.backup_questions = self._generate_backup_questions()
        self.current_score = 0
        self.question_count = 0

    def _generate_backup_questions(self) -> List[str]:
        """生成保底题库"""
        # 使用所有文档内容生成保底题库
        try:
            prompt = f"基于以下知识库内容生成5个考试题目:\n{self.kb.documents[:3]}\n要求: 每个题目必须包含至少一个关键词({', '.join(self.kb.keywords[:10])})"
            response = self._call_deepseek(prompt)
            questions = [q.strip() for q in response.split('\n') if q.strip()][:5]
            if questions:
                return questions
        except Exception as e:
            print(f"生成保底题库失败: {str(e)}")

        # 如果API调用失败，使用本地生成的保底问题
        return [
            f"请解释{kw}的概念"
            for kw in self.kb.keywords[:5]
        ]

    def _call_deepseek(self, prompt: str) -> str:
        """调用DeepSeek API - 增强错误处理和响应解析"""
        headers = {
            "Authorization": f"Bearer {API_KEY}",
            "Content-Type": "application/json"
        }
        payload = {
            "model": "deepseek-chat",  # 使用正确的模型名称
            "messages": [{"role": "user", "content": prompt}],
            "max_tokens": 1000,
            "temperature": 0.7
        }
        try:
            response = requests.post(API_URL, headers=headers, json=payload, timeout=30)
            response.raise_for_status()  # 检查HTTP错误

            # 解析JSON响应
            json_response = response.json()

            # 调试：打印整个API响应
            print("API响应:", json.dumps(json_response, indent=2, ensure_ascii=False))

            # 检查响应结构
            if 'choices' in json_response and json_response['choices']:
                return json_response['choices'][0]['message']['content']
            else:
                print(f"API响应格式异常: 缺少choices字段")
                return ""
        except requests.exceptions.RequestException as e:
            print(f"API请求失败: {str(e)}")
            return ""
        except (KeyError, ValueError) as e:
            print(f"API响应解析失败: {str(e)}")
            return ""
        except Exception as e:
            print(f"API调用未知错误: {str(e)}")
            return ""

    def _contains_banned_words(self, question: str) -> bool:
        """检查是否包含禁止词 - 增强匹配逻辑"""
        # 使用正则表达式确保完整单词匹配
        pattern = r'\b(' + '|'.join(re.escape(word) for word in BANNED_WORDS) + r')\b'
        return bool(re.search(pattern, question))

    def _calculate_similarity(self, new_question: str) -> float:
        """计算与已有题目的最大相似度"""
        if not self.generated_questions:
            return 0.0

        try:
            new_vec = self.kb.vectorizer.transform([new_question])
            existing_vecs = self.kb.vectorizer.transform(self.generated_questions)
            similarities = cosine_similarity(new_vec, existing_vecs)
            return similarities.max()
        except Exception as e:
            print(f"相似度计算错误: {str(e)}")
            return 0.0  # 出错时返回0相似度

    def generate_question(self) -> str:
        """生成符合要求的题目"""
        attempts = 0
        while attempts < MAX_ATTEMPTS:
            # 使用关键词构建提示
            num_keywords = min(3, len(self.kb.keywords))
            if num_keywords == 0:
                # 如果没有关键词，使用默认问题
                return "请解释知识库中的核心概念"

            keywords = np.random.choice(self.kb.keywords, num_keywords, replace=False)
            prompt = (
                f"基于知识库生成一个考试题目，要求:"
                f"\n1. 必须包含关键词: {', '.join(keywords)}"
                f"\n2. 禁止包含以下词语: {', '.join(BANNED_WORDS)}"
                f"\n3. 题目形式为问答题"
                f"\n输出格式: 直接输出题目内容"
            )

            question = self._call_deepseek(prompt).strip()

            # 验证生成的题目
            if not question:
                attempts += 1
                print(f"题目生成失败: 空题目 (尝试 {attempts}/{MAX_ATTEMPTS})")
                continue

            if self._contains_banned_words(question):
                attempts += 1
                print(f"题目生成失败: 包含禁止词 (尝试 {attempts}/{MAX_ATTEMPTS})")
                continue

            similarity = self._calculate_similarity(question)
            if similarity > 0.7:
                attempts += 1
                print(f"题目生成失败: 相似度过高 ({similarity:.2f}) (尝试 {attempts}/{MAX_ATTEMPTS})")
                continue

            self.generated_questions.append(question)
            return question

        # 使用保底题库
        if self.backup_questions:
            backup = self.backup_questions.pop(0)
            self.generated_questions.append(backup)
            print("使用保底题库生成题目")
            return backup
        else:
            # 最终保底方案
            num_keywords = min(2, len(self.kb.keywords))
            if num_keywords > 0:
                keywords = np.random.choice(self.kb.keywords, num_keywords, replace=False)
                question = f"请解释'{keywords[0]}'和'{keywords[1]}'之间的关系"
            else:
                question = "请解释知识库中的核心概念"

            self.generated_questions.append(question)
            print("使用最终保底方案生成题目")
            return question

    def evaluate_answer(self, question: str, answer: str) -> Tuple[int, str]:
        """评估答案并返回分数和反馈"""
        # 检测无效回答
        clean_answer = answer.replace('\\', '')  # 移除转义字符
        if any(inv in clean_answer for inv in INVALID_ANSWERS) or len(clean_answer) < 10:
            return 0, "检测到无效回答，得0分"

        # 关键词评分
        q_keywords = self._extract_keywords(question)
        a_keywords = self._extract_keywords(clean_answer)

        # 核心关键词匹配
        core_score = sum(3 for kw in q_keywords[:3] if kw in a_keywords) if q_keywords else 0
        # 扩展关键词匹配
        extended_score = sum(1 for kw in q_keywords[3:6] if kw in a_keywords) if q_keywords else 0

        total_score = min(core_score + extended_score, 10)  # 单题满分10分

        # 生成详细反馈
        feedback = self._generate_feedback(question, clean_answer, total_score, core_score, extended_score)
        return total_score, feedback

    def _extract_keywords(self, text: str) -> List[str]:
        """提取文本关键词 - 使用知识库的分词方法"""
        words = self.kb._tokenize(text)
        return [word for word in words if word in self.kb.keywords]

    def _generate_feedback(self, question: str, answer: str, score: int, core: int, ext: int) -> str:
        """生成评分反馈 - 添加本地备选方案"""
        try:
            prompt = (
                f"作为考官，请对以下答题进行结构化反馈:\n"
                f"问题: {question}\n"
                f"回答: {answer}\n"
                f"评分结果: 总分{score}/10 (核心分:{core}, 扩展分:{ext})\n"
                f"反馈要求: 1) 指出回答中的优点 2) 说明扣分原因 3) 给出改进建议"
            )
            feedback = self._call_deepseek(prompt)
            if not feedback:
                raise ValueError("API返回空反馈")
            return feedback
        except:
            # API调用失败时使用本地生成的反馈
            q_keywords = self._extract_keywords(question)
            a_keywords = self._extract_keywords(answer)
            matched = len(set(q_keywords) & set(a_keywords))
            total = len(q_keywords)

            feedback_lines = [
                f"您的回答获得了 {score}/10 分。",
                f"优点: 提到了部分相关概念",
                f"不足: 未完整覆盖问题的核心要点 (匹配关键词: {matched}/{total})",
                f"建议: 更详细地解释关键概念并提供具体例子"
            ]
            return "\n".join(feedback_lines)

    def start_exam(self):
        """启动考试流程"""
        print("考试开始! 共5题，每题10分，满分50分")
        self.current_score = 0

        for i in range(5):
            self.question_count += 1
            print(f"\n--- 第{i + 1}题 ---")
            question = self.generate_question()
            print(f"问题: {question}")

            print("请输入答案 (支持多行输入，单独一行输入'END'结束):")
            user_input = []
            while True:
                try:
                    line = input()
                    # 检查是否包含结束标记
                    if line.strip().replace('\\', '') == 'END':  # 处理可能的转义字符
                        break
                    user_input.append(line)
                except EOFError:
                    print("\n检测到输入结束，终止输入")
                    break
            answer = '\n'.join(user_input)

            score, feedback = self.evaluate_answer(question, answer)
            self.current_score += score

            print(f"\n评分结果: {score}/10")
            print(f"详细反馈: {feedback}")

        print(f"\n考试结束! 最终得分: {self.current_score}/50")


if __name__ == "__main__":
    # 初始化知识库处理器 - 支持多个文件
    try:
        print("正在初始化知识库...")
        kb_processor = KnowledgeBaseProcessor(KNOWLEDGE_PATHS)
        print(f"知识库加载完成，共加载 {len(kb_processor.documents)} 个文档")
        print(f"提取关键词: {', '.join(kb_processor.keywords[:10])}等")
    except Exception as e:
        print(f"初始化失败: {str(e)}")
        exit(1)

    # 启动考试系统
    exam_system = ExamSystem(kb_processor)
    exam_system.start_exam()