import os
import json
import jieba
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
from typing import List, Dict, Tuple, Optional
import time
import hashlib

# 配置参数
VECTOR_STORE_PATH = "./vector_store"
CACHE_DIR = "./cache"
SIMILARITY_THRESHOLD = 0.7  # 题目相似度阈值
MAX_GENERATION_ATTEMPTS = 5  # 最大题目生成尝试次数
TOTAL_QUESTIONS = 5  # 总题目数量
TOTAL_SCORE = 50  # 总分
KEYWORD_WEIGHT = {
    "core": 0.6,  # 核心关键词权重
    "extended": 0.3  # 扩展关键词权重
}

# 初始化文件夹
os.makedirs(CACHE_DIR, exist_ok=True)


# 1. 知识库处理模块
class KnowledgeBaseProcessor:
    def __init__(self, knowledge_dir: str = "D:\桌面\第3章 数据获取.docx"):
        self.knowledge_dir = knowledge_dir
        self.vector_store = None
        self.documents = None
        self.keywords = None
        self.tfidf_vectorizer = TfidfVectorizer(tokenizer=self._jieba_tokenize)

    def _jieba_tokenize(self, text: str) -> List[str]:
        """使用jieba进行中文分词"""
        return list(jieba.cut(text))

    def load_documents(self) -> None:
        """加载知识库文档"""
        loader = DirectoryLoader(
            self.knowledge_dir,
            glob="*.txt",
            loader_cls=TextLoader,
            loader_kwargs={"encoding": "utf-8"}
        )
        self.documents = loader.load()
        print(f"加载了 {len(self.documents)} 个文档")

    def split_documents(self) -> List:
        """分割文档"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        """创建向量存储"""
        if os.path.exists(VECTOR_STORE_PATH):
            # 加载已存在的向量存储
            self.vector_store = FAISS.load_local(
                VECTOR_STORE_PATH,
                HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"),
                allow_dangerous_deserialization=True
            )
            print("加载已存在的向量存储")
        else:
            # 创建新的向量存储
            splits = self.split_documents()
            self.vector_store = FAISS.from_documents(
                splits,
                HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
            )
            self.vector_store.save_local(VECTOR_STORE_PATH)
            print("创建新的向量存储")

    def extract_keywords(self) -> None:
        """提取关键词"""
        if self.documents is None:
            self.load_documents()

        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)

        # 获取关键词及其权重
        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    if feature_names[idx] in keywords_with_weights:
                        keywords_with_weights[feature_names[idx]] += weight
                    else:
                        keywords_with_weights[feature_names[idx]] = weight

        # 排序并筛选关键词
        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],  # 核心关键词
            "extended": [kw for kw, _ in sorted_keywords[20:100]]  # 扩展关键词
        }
        print(f"提取到 {len(self.keywords['core'])} 个核心关键词和 {len(self.keywords['extended'])} 个扩展关键词")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        """检索相关文档"""
        if self.vector_store is None:
            self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)


# 2. 题目生成模块
class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com/v1",
            model_name="deepseek-v3"
        )
        self.generated_questions = []
        self保底题库 = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        """初始化提示模板"""
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords", "forbidden_words"],
            template="""基于以下上下文内容，生成一个关于"{keywords}"相关的问题，要求：
1. 问题必须与上下文内容紧密相关
2. 问题中不能包含以下词汇：{forbidden_words}
3. 问题应考察对核心概念的理解
4. 问题形式可以是选择题、简答题或判断题
5. 确保问题清晰明确，避免歧义

上下文：
{context}

生成的问题："""
        )

        self保底题生成_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题，确保每个问题都包含至少一个关键词：
关键词：{keywords}
上下文：{context}
每个问题单独一行，不要编号："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        """检查是否包含禁止词汇"""
        forbidden = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def _calculate_similarity(self, question1: str, question2: str, vectorizer=None) -> float:
        """计算两个问题的相似度"""
        texts = [question1, question2]
        vectorsizer = TfidfVectorizer(tokenizer=self.kb_processor._jieba_tokenize)
        tfidf_matrix = vectorizer.fit_transform(texts)
        return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]

    def _generate_base_questions(self, keyword_str=None):
        """生成保底题库"""
        if not self.保底题库:
            context = "\n\n".join([doc.page_content for doc in self.kb_processor.documents[:3]])
            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10])

            chain = LLMChain(llm=self.llm, prompt=self.保底题生成_prompt)
            result = chain.run(context=context, keywords=keyword_str)
            self.保底题库 = [q.strip() for q in result.split("\n") if q.strip()]
            print(f"生成了 {len(self.保底题库)} 道保底题目")

    def generate_question(self) -> str:
        """生成符合要求的题目"""
        forbidden_words = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        attempts = 0

        # 确保有保底题库
        self._generate_base_questions()

        while attempts < MAX_GENERATION_ATTEMPTS:
            try:
                # 随机选择关键词
                keywords = np.random.choice(
                    self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"],
                    size=np.random.randint(1, 3)
                )
                keyword_str = ", ".join(keywords)

                # 检索相关文档
                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

                # 生成题目
                chain = LLMChain(llm=self.llm, prompt=self.question_prompt)
                question = chain.run(
                    context=context,
                    keywords=keyword_str,
                    forbidden_words=", ".join(forbidden_words)
                ).strip()

                # 检查禁止词汇
                if self._check_forbidden_words(question):
                    attempts += 1
                    continue

                # 检查相似度
                similar = False
                for q in self.generated_questions:
                    if self._calculate_similarity(question, q) > SIMILARITY_THRESHOLD:
                        similar = True
                        break

                if not similar:
                    self.generated_questions.append(question)
                    return question

            except Exception as e:
                print(f"题目生成出错: {str(e)}")

            attempts += 1
            time.sleep(1)

        # 如果多次生成失败，使用保底题库
        print("使用保底题目")
        for q in self.保底题库:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q

        # 最后手段：从保底题库随机选择
        return np.random.choice(self.保底题库)


# 3. 考试交互模块
class ExamInteractionModule:
    def __init__(self, question_generator: QuestionGenerator):
        self.question_generator = question_generator
        self.memory = ConversationBufferMemory(memory_key="chat_history")
        self.score = 0
        self.current_question = 0
        self.answers = []

    def start_exam(self) -> None:
        """开始考试"""
        print(f"欢迎参加考试！本次考试共 {TOTAL_QUESTIONS} 题，满分 {TOTAL_SCORE} 分。")
        print("请认真作答，每题完成后将自动评分并给出反馈。\n")

    def get_next_question(self) -> Optional[str]:
        """获取下一题"""
        if self.current_question < TOTAL_QUESTIONS:
            self.current_question += 1
            return self.question_generator.generate_question()
        return None

    def collect_answer(self, question: str) -> str:
        """收集用户答案"""
        print(f"\n第 {self.current_question} 题: {question}")
        print("请输入答案（多行输入，输入空行结束）：")

        answer_lines = []
        while True:
            line = input()
            if not line:
                break
            answer_lines.append(line)

        user_answer = "\n".join(answer_lines)
        self.answers.append({
            "question": question,
            "answer": user_answer,
            "score": 0,
            "feedback": ""
        })
        return user_answer

    def check_abnormal_answer(self, answer: str) -> bool:
        """检测非正常答题行为"""
        abnormal_patterns = [
            "不会", "没学", "不知道", "不懂",
            "乱答", "随便写", "略", "同上",
            # 检测是否复制题目
            lambda a, q: a in q or q in a,
            # 检测仅关键词
            lambda a: len(a) < 5 and len(a.split()) <= 2
        ]

        answer_lower = answer.lower()
        question = self.answers[-1]["question"].lower()

        for pattern in abnormal_patterns:
            if isinstance(pattern, str) and pattern in answer_lower:
                return True
            if callable(pattern):
                try:
                    if pattern(answer_lower, question):
                        return True
                except:
                    if pattern(answer_lower):
                        return True
        return False

    def finish_exam(self) -> None:
        """结束考试并显示结果"""
        print(f"\n考试结束！您的总分为：{self.score}/{TOTAL_SCORE}")
        print("\n答题详情：")
        for i, item in enumerate(self.answers, 1):
            print(f"\n第 {i} 题: {item['question']}")
            print(f"您的答案: {item['answer']}")
            print(f"得分: {item['score']}")
            print(f"反馈: {item['feedback']}")


# 4. 评分反馈模块
class ScoringFeedbackModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key="sk-47fd418b4a8d4dffab040d3e9a282627",
            base_url="https://api.deepseek.com/v1",
            model_name="deepseek-v3"
        )
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        """初始化评分提示模板"""
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points"],
            template="""请基于以下信息对用户的答案进行评分和反馈：

问题：{question}
用户答案：{user_answer}
参考上下文：{context}
核心关键词：{keywords}
满分分值：{total_points}

评分要求：
1. 准确判断答案的正确性和完整性
2. 核心关键词出现与否应作为重要评分依据
3. 给出具体得分（0-{total_points}之间）
4. 提供详细的反馈，包括正确答案要点和用户答案的优缺点
5. 评分结果格式：先输出分数，然后另起一行输出反馈内容

评分和反馈："""
        )

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        """评分并生成反馈"""
        # 获取相关上下文
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

        # 获取关键词
        keywords = ", ".join(self.kb_processor.keywords["core"][:10])

        # 调用LLM进行评分
        chain = LLMChain(llm=self.llm, prompt=self.scoring_prompt)
        result = chain.run(
            question=question,
            user_answer=user_answer,
            context=context,
            keywords=keywords,
            total_points=total_points
        )

        # 解析结果
        try:
            score_line, *feedback_lines = result.split("\n")
            score = int(score_line.strip())
            feedback = "\n".join(feedback_lines).strip()
            return max(0, min(total_points, score)), feedback
        except:
            return 0, "评分失败，无法正确解析结果"


# 主程序
def main():
    # 初始化模块
    kb_processor = KnowledgeBaseProcessor()
    kb_processor.load_documents()
    kb_processor.create_vector_store()
    kb_processor.extract_keywords()

    question_generator = QuestionGenerator(kb_processor)
    exam_interaction = ExamInteractionModule(question_generator)
    scoring_module = ScoringFeedbackModule(kb_processor)

    # 开始考试
    exam_interaction.start_exam()

    # 逐题进行
    for _ in range(TOTAL_QUESTIONS):
        question = exam_interaction.get_next_question()
        if not question:
            break

        user_answer = exam_interaction.collect_answer(question)

        # 检查非正常答题
        if exam_interaction.check_abnormal_answer(user_answer):
            exam_interaction.answers[-1]["score"] = 0
            exam_interaction.answers[-1]["feedback"] = "检测到非正常答题行为，本题按0分处理。"
            print("得分：0分")
            print("反馈：检测到非正常答题行为，本题按0分处理。")
            continue

        # 评分
        points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
        score, feedback = scoring_module.score_answer(
            question,
            user_answer,
            points_per_question
        )

        # 更新分数和反馈
        exam_interaction.score += score
        exam_interaction.answers[-1]["score"] = score
        exam_interaction.answers[-1]["feedback"] = feedback

        print(f"得分：{score}/{points_per_question}分")
        print(f"反馈：{feedback}")

    # 结束考试
    exam_interaction.finish_exam()


if __name__ == "__main__":
    main()
