import os
import jieba
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import (
    DirectoryLoader,
    TextLoader,
    Docx2txtLoader
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import OpenAI
from typing import List, Dict, Tuple, Optional
import time

# 配置参数
VECTOR_STORE_PATH = "./vector_store"  # 向量缓存目录
CACHE_DIR = "./cache"  # 缓存目录
# 默认知识库目录：用户桌面的knowledge文件夹（自动创建）
DEFAULT_KNOWLEDGE_DIR = os.path.join(os.path.expanduser("~"), "Desktop", "knowledge")
SIMILARITY_THRESHOLD = 0.7  # 题目相似度阈值
MAX_GENERATION_ATTEMPTS = 5  # 最大题目生成尝试次数
TOTAL_QUESTIONS = 5  # 总题目数量
TOTAL_SCORE = 50  # 总分
KEYWORD_WEIGHT = {
    "core": 0.6,  # 核心关键词权重
    "extended": 0.3  # 扩展关键词权重
}

# 初始化必要目录
os.makedirs(CACHE_DIR, exist_ok=True)
os.makedirs(VECTOR_STORE_PATH, exist_ok=True)

# 确保知识库目录存在
if not os.path.exists(DEFAULT_KNOWLEDGE_DIR):
    os.makedirs(DEFAULT_KNOWLEDGE_DIR)
    print(f"系统提示：已自动创建知识库目录\n路径：{DEFAULT_KNOWLEDGE_DIR}")
    print(f"请将您的知识库文件（.txt或.docx格式）放入该目录后重新运行程序\n")


# 1. 知识库处理模块
class KnowledgeBaseProcessor:
    def __init__(self, knowledge_dir: str = DEFAULT_KNOWLEDGE_DIR):
        self.knowledge_dir = knowledge_dir
        self.vector_store = None
        self.documents = None
        self.keywords = None
        self.tfidf_vectorizer = TfidfVectorizer(tokenizer=self._jieba_tokenize)

    def _jieba_tokenize(self, text: str) -> List[str]:
        """使用jieba进行中文分词"""
        return list(jieba.cut(text))

    def load_documents(self) -> None:
        """加载知识库文档（支持txt和docx）"""
        # 验证目录有效性
        if not os.path.exists(self.knowledge_dir):
            raise FileNotFoundError(f"知识库目录不存在：{self.knowledge_dir}\n请检查路径是否正确")

        if not os.path.isdir(self.knowledge_dir):
            raise NotADirectoryError(f"指定路径不是目录：{self.knowledge_dir}\n请提供文件夹路径而非文件路径")

        # 检查是否有可用文件
        all_files = os.listdir(self.knowledge_dir)
        valid_files = [f for f in all_files if f.lower().endswith(('.txt', '.docx'))]

        if not valid_files:
            raise ValueError(
                f"知识库目录中未找到有效文件（.txt或.docx）\n"
                f"目录：{self.knowledge_dir}\n"
                f"请放入至少一个知识库文件后重试"
            )

        # 加载txt文件
        txt_loader = DirectoryLoader(
            self.knowledge_dir,
            glob="*.txt",
            loader_cls=TextLoader,
            loader_kwargs={"encoding": "utf-8"}
        )

        # 加载docx文件
        docx_loader = DirectoryLoader(
            self.knowledge_dir,
            glob="*.docx",
            loader_cls=Docx2txtLoader
        )

        # 合并文档
        self.documents = txt_loader.load() + docx_loader.load()
        print(f"成功加载文档：{len(self.documents)}个（txt: {len(txt_loader.load())}, docx: {len(docx_loader.load())}）")

    def split_documents(self) -> List:
        """分割文档为片段"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        """创建或加载向量存储（缓存机制）"""
        if os.path.exists(VECTOR_STORE_PATH) and os.listdir(VECTOR_STORE_PATH):
            # 加载已存在的向量存储（避免重复计算）
            self.vector_store = FAISS.load_local(
                VECTOR_STORE_PATH,
                HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"),
                allow_dangerous_deserialization=True
            )
            print("已加载缓存的向量存储")
        else:
            # 新建向量存储
            splits = self.split_documents()
            self.vector_store = FAISS.from_documents(
                splits,
                HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
            )
            self.vector_store.save_local(VECTOR_STORE_PATH)
            print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        """使用TF-IDF提取关键词"""
        if self.documents is None:
            self.load_documents()

        # 分词并构建TF-IDF矩阵
        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)

        # 提取关键词及权重
        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight

        # 排序并分类关键词
        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],  # 核心关键词
            "extended": [kw for kw, _ in sorted_keywords[20:100]]  # 扩展关键词
        }
        print(f"提取关键词：核心{len(self.keywords['core'])}个，扩展{len(self.keywords['extended'])}个")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        """检索相关文档"""
        if self.vector_store is None:
            self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)


# 2. 题目生成模块
class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com/v1",
            model_name="deepseek-v3"
        )
        self.generated_questions = []
        self.backup_questions = []  # 保底题库
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        """初始化提示模板"""
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords", "forbidden_words"],
            template="""基于以下上下文和关键词，生成一个问题：
关键词：{keywords}
上下文：{context}
要求：
1. 问题必须包含至少一个关键词
2. 禁止出现这些词：{forbidden_words}
3. 题型可以是选择、判断或简答，考察对核心概念的理解
4. 问题清晰无歧义，与上下文紧密相关

生成的问题："""
        )

        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题：
关键词：{keywords}
上下文：{context}
要求：每个问题包含至少一个关键词，不要编号，每行一个问题，禁止出现"如图"等图片相关词汇

问题："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        """检查是否包含禁止词汇"""
        forbidden = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def _calculate_similarity(self, question1: str, question2: str) -> float:
        """计算两个问题的相似度（余弦相似度）"""
        vectorizer = TfidfVectorizer(tokenizer=self.kb_processor._jieba_tokenize)
        tfidf_matrix = vectorizer.fit_transform([question1, question2])
        return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]

    def _generate_backup_questions(self):
        """生成保底题库"""
        if not self.backup_questions:
            context = "\n\n".join([doc.page_content[:1000] for doc in self.kb_processor.documents[:3]])
            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10])

            chain = LLMChain(llm=self.llm, prompt=self.backup_prompt)
            result = chain.run(context=context, keywords=keywords_str)
            self.backup_questions = [q.strip() for q in result.split("\n") if q.strip()]
            print(f"生成保底题库：{len(self.backup_questions)}题")

    def generate_question(self) -> str:
        """生成符合要求的题目（带重试机制）"""
        forbidden_words = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        attempts = 0

        # 确保保底题库存在
        self._generate_backup_questions()

        while attempts < MAX_GENERATION_ATTEMPTS:
            try:
                # 随机选择关键词
                keywords = np.random.choice(
                    self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"],
                    size=np.random.randint(1, 3)
                )
                keyword_str = ", ".join(keywords)

                # 检索相关文档
                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

                # 生成题目
                chain = LLMChain(llm=self.llm, prompt=self.question_prompt)
                question = chain.run(
                    context=context,
                    keywords=keyword_str,
                    forbidden_words=", ".join(forbidden_words)
                ).strip()

                # 检查禁止词汇
                if self._check_forbidden_words(question):
                    attempts += 1
                    continue

                # 检查与历史题目的相似度
                similar = False
                for q in self.generated_questions:
                    if self._calculate_similarity(question, q) > SIMILARITY_THRESHOLD:
                        similar = True
                        break

                if not similar:
                    self.generated_questions.append(question)
                    return question

            except Exception as e:
                print(f"题目生成失败（尝试{attempts + 1}/{MAX_GENERATION_ATTEMPTS}）：{str(e)}")

            attempts += 1
            time.sleep(1)

        # 多次失败后使用保底题库
        print("使用保底题目")
        for q in self.backup_questions:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q

        # 最后手段：随机选择保底题目
        return np.random.choice(self.backup_questions)


# 3. 考试交互模块
class ExamInteractionModule:
    def __init__(self, question_generator: QuestionGenerator):
        self.question_generator = question_generator
        self.memory = ConversationBufferMemory(memory_key="chat_history")
        self.score = 0
        self.current_question = 0
        self.answers = []

    def start_exam(self) -> None:
        """开始考试"""
        print(f"===== 考试开始 =====")
        print(f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分")
        print(f"每题答完后将自动评分，输入空行结束答题\n")

    def get_next_question(self) -> Optional[str]:
        """获取下一题"""
        if self.current_question < TOTAL_QUESTIONS:
            self.current_question += 1
            return self.question_generator.generate_question()
        return None

    def collect_answer(self, question: str) -> str:
        """收集用户答案（支持多行输入）"""
        print(f"\n第{self.current_question}题：{question}")
        print("请输入答案（输入空行结束）：")

        answer_lines = []
        while True:
            line = input()
            if not line:
                break
            answer_lines.append(line)

        user_answer = "\n".join(answer_lines)
        self.answers.append({
            "question": question,
            "answer": user_answer,
            "score": 0,
            "feedback": ""
        })
        return user_answer

    def check_abnormal_answer(self, answer: str) -> bool:
        """检测非正常答题行为"""
        abnormal_patterns = [
            "不会", "没学", "不知道", "不懂", "略",
            "乱答", "随便写", "同上", "无", "null"
        ]
        answer_lower = answer.lower()
        question = self.answers[-1]["question"].lower()

        # 检查关键词匹配
        for pattern in abnormal_patterns:
            if pattern in answer_lower:
                return True

        # 检查是否复制题目
        if answer_lower in question or question in answer_lower:
            return True

        # 检查是否仅关键词
        if len(answer_lower) < 5 and len(answer_lower.split()) <= 2:
            return True

        return False

    def finish_exam(self) -> None:
        """结束考试并显示结果"""
        print(f"\n===== 考试结束 =====")
        print(f"您的总得分：{self.score}/{TOTAL_SCORE}")
        print("\n===== 答题详情 =====")
        for i, item in enumerate(self.answers, 1):
            print(f"\n第{i}题：{item['question']}")
            print(f"您的答案：{item['answer']}")
            print(f"得分：{item['score']}")
            print(f"反馈：{item['feedback']}")


# 4. 评分反馈模块
class ScoringFeedbackModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key=os.getenv("DEEPSEEK_API_KEY"),
            base_url="https://api.deepseek.com/v1",
            model_name="deepseek-v3"
        )
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        """初始化评分提示模板"""
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points"],
            template="""请基于以下信息评分：
问题：{question}
用户答案：{user_answer}
参考上下文：{context}
核心关键词：{keywords}
满分：{total_points}分

评分规则：
1. 核心关键词出现情况权重60%，扩展内容权重40%
2. 答案正确且完整得满分，部分正确按比例给分，错误得0分
3. 先输出分数（仅数字），再另起一行输出详细反馈（包括正确要点和用户答案优缺点）

评分结果："""
        )

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        """评分并生成反馈"""
        # 获取相关上下文
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

        # 获取关键词
        keywords = ", ".join(self.kb_processor.keywords["core"][:10])

        # 调用LLM评分
        chain = LLMChain(llm=self.llm, prompt=self.scoring_prompt)
        result = chain.run(
            question=question,
            user_answer=user_answer,
            context=context,
            keywords=keywords,
            total_points=total_points
        )

        # 解析结果
        try:
            score_line, *feedback_lines = result.split("\n")
            score = int(score_line.strip())
            feedback = "\n".join(feedback_lines).strip()
            return max(0, min(total_points, score)), feedback
        except:
            return 0, "评分解析失败，请检查模型输出格式"


# 主程序
def main():
    try:
        # 初始化模块
        kb_processor = KnowledgeBaseProcessor()
        kb_processor.load_documents()
        kb_processor.create_vector_store()
        kb_processor.extract_keywords()

        question_generator = QuestionGenerator(kb_processor)
        exam_interaction = ExamInteractionModule(question_generator)
        scoring_module = ScoringFeedbackModule(kb_processor)

        # 开始考试流程
        exam_interaction.start_exam()

        # 逐题处理
        for _ in range(TOTAL_QUESTIONS):
            question = exam_interaction.get_next_question()
            if not question:
                break

            user_answer = exam_interaction.collect_answer(question)

            # 检测非正常答题
            if exam_interaction.check_abnormal_answer(user_answer):
                exam_interaction.answers[-1]["score"] = 0
                exam_interaction.answers[-1]["feedback"] = "检测到非正常答题行为，本题按0分处理"
                print("得分：0分")
                print("反馈：检测到非正常答题行为，本题按0分处理")
                continue

            # 评分
            points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
            score, feedback = scoring_module.score_answer(
                question,
                user_answer,
                points_per_question
            )

            # 更新结果
            exam_interaction.score += score
            exam_interaction.answers[-1]["score"] = score
            exam_interaction.answers[-1]["feedback"] = feedback

            print(f"得分：{score}/{points_per_question}分")
            print(f"反馈：{feedback}")

        # 结束考试
        exam_interaction.finish_exam()

    except Exception as e:
        print(f"\n程序出错：{str(e)}")
        print("请根据提示解决问题后重新运行程序")


if __name__ == "__main__":
    main()
