import os
os.environ['TRANSFORMERS_OFFLINE'] = '1'  # 强制使用本地模型
os.environ['HF_DATASETS_OFFLINE'] = '1'   # 禁用数据集下载
import jieba
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import OpenAI
from typing import List, Dict, Tuple, Optional
import time



# 配置参数 - 修改为单个docx文件路径
KNOWLEDGE_FILE = "D:\\桌面\\第3章 数据获取.docx"  # 注意转义反斜杠
VECTOR_STORE_PATH = "./vector_store"
CACHE_DIR = "./cache"
SIMILARITY_THRESHOLD = 0.7
MAX_GENERATION_ATTEMPTS = 5
TOTAL_QUESTIONS = 5
TOTAL_SCORE = 50
KEYWORD_WEIGHT = {
    "core": 0.6,
    "extended": 0.3
}

# 初始化缓存目录
os.makedirs(CACHE_DIR, exist_ok=True)
os.makedirs(VECTOR_STORE_PATH, exist_ok=True)


# 1. 知识库处理模块（适配单个docx文件）
class KnowledgeBaseProcessor:
    def __init__(self, knowledge_file: str = KNOWLEDGE_FILE):
        self.knowledge_file = knowledge_file  # 单个文件路径
        self.vector_store = None
        self.documents = None
        self.keywords = None
        self.tfidf_vectorizer = TfidfVectorizer(tokenizer=self._jieba_tokenize)

    def _jieba_tokenize(self, text: str) -> List[str]:
        return list(jieba.cut(text))

    def load_documents(self) -> None:
        """加载单个docx文件"""
        # 验证文件是否存在
        if not os.path.exists(self.knowledge_file):
            raise FileNotFoundError(f"文件不存在：{self.knowledge_file}\n请检查路径是否正确")

        if not self.knowledge_file.lower().endswith(".docx"):
            raise ValueError(f"文件格式错误：{self.knowledge_file}\n请提供docx格式文件")

        # 使用单个文件加载器
        loader = Docx2txtLoader(self.knowledge_file)
        self.documents = loader.load()  # 加载单个文件
        print(f"成功加载docx文件：{self.knowledge_file}（共{len(self.documents)}个文档对象）")

    def split_documents(self) -> List:
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        # 创建嵌入模型实例
        embeddings = HuggingFaceEmbeddings(
            model_name="D:\\local_models\\all-MiniLM-L6-v2",
            model_kwargs={'device': 'cpu'},
            encode_kwargs={'normalize_embeddings': True}
        )

        if os.path.exists(VECTOR_STORE_PATH) and os.listdir(VECTOR_STORE_PATH):
            self.vector_store = FAISS.load_local(
                VECTOR_STORE_PATH,
                embeddings,
                allow_dangerous_deserialization=True
            )
            print("已加载缓存的向量存储")
        else:
            splits = self.split_documents()
            self.vector_store = FAISS.from_documents(
                splits,
                embeddings
            )
            self.vector_store.save_local(VECTOR_STORE_PATH)
            print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        if self.documents is None:
            self.load_documents()

        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)

        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight

        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],
            "extended": [kw for kw, _ in sorted_keywords[20:100]]
        }
        print(f"提取关键词：核心{len(self.keywords['core'])}个，扩展{len(self.keywords['extended'])}个")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        if self.vector_store is None:
            self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)


# 2. 题目生成模块（保持不变）
class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key="sk-faa84a6e39ed4a78a90b49b8fb811bfc",
            base_url="https://api.deepseek.com/v1",
            model_name="deepseek-v3"
        )
        self.generated_questions = []
        self.backup_questions = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords", "forbidden_words"],
            template="""基于以下上下文和关键词，生成一个问题：
关键词：{keywords}
上下文：{context}
要求：
1. 问题必须包含至少一个关键词
2. 禁止出现这些词：{forbidden_words}
3. 题型可以是选择、判断或简答，考察对核心概念的理解
4. 问题清晰无歧义，与上下文紧密相关

生成的问题："""
        )

        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题：
关键词：{keywords}
上下文：{context}
要求：每个问题包含至少一个关键词，不要编号，每行一个问题，禁止出现"如图"等图片相关词汇

问题："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        forbidden = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def _calculate_similarity(self, question1: str, question2: str) -> float:
        vectorizer = TfidfVectorizer(tokenizer=self.kb_processor._jieba_tokenize)
        tfidf_matrix = vectorizer.fit_transform([question1, question2])
        return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]

    def _generate_backup_questions(self):
        if not self.backup_questions:
            context = "\n\n".join([doc.page_content[:1000] for doc in self.kb_processor.documents[:3]])
            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10])

            chain = LLMChain(llm=self.llm, prompt=self.backup_prompt)
            result = chain.run(context=context, keywords=keywords_str)
            self.backup_questions = [q.strip() for q in result.split("\n") if q.strip()]
            print(f"生成保底题库：{len(self.backup_questions)}题")

    def generate_question(self) -> str:
        forbidden_words = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        attempts = 0

        self._generate_backup_questions()

        while attempts < MAX_GENERATION_ATTEMPTS:
            try:
                keywords = np.random.choice(
                    self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"],
                    size=np.random.randint(1, 3)
                )
                keyword_str = ", ".join(keywords)

                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

                chain = LLMChain(llm=self.llm, prompt=self.question_prompt)
                question = chain.run(
                    context=context,
                    keywords=keyword_str,
                    forbidden_words=", ".join(forbidden_words)
                ).strip()

                if self._check_forbidden_words(question):
                    attempts += 1
                    continue

                similar = False
                for q in self.generated_questions:
                    if self._calculate_similarity(question, q) > SIMILARITY_THRESHOLD:
                        similar = True
                        break

                if not similar:
                    self.generated_questions.append(question)
                    return question

            except Exception as e:
                print(f"题目生成失败（尝试{attempts + 1}/{MAX_GENERATION_ATTEMPTS}）：{str(e)}")

            attempts += 1
            time.sleep(1)

        print("使用保底题目")
        for q in self.backup_questions:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q

        return np.random.choice(self.backup_questions)


# 3. 考试交互模块（保持不变）
class ExamInteractionModule:
    def __init__(self, question_generator: QuestionGenerator):
        self.question_generator = question_generator
        self.memory = ConversationBufferMemory(memory_key="chat_history")
        self.score = 0
        self.current_question = 0
        self.answers = []

    def start_exam(self) -> None:
        print(f"===== 考试开始 =====")
        print(f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分")
        print(f"每题答完后将自动评分，输入空行结束答题\n")

    def get_next_question(self) -> Optional[str]:
        if self.current_question < TOTAL_QUESTIONS:
            self.current_question += 1
            return self.question_generator.generate_question()
        return None

    def collect_answer(self, question: str) -> str:
        print(f"\n第{self.current_question}题：{question}")
        print("请输入答案（输入空行结束）：")

        answer_lines = []
        while True:
            line = input()
            if not line:
                break
            answer_lines.append(line)

        user_answer = "\n".join(answer_lines)
        self.answers.append({
            "question": question,
            "answer": user_answer,
            "score": 0,
            "feedback": ""
        })
        return user_answer

    def check_abnormal_answer(self, answer: str) -> bool:
        abnormal_patterns = [
            "不会", "没学", "不知道", "不懂", "略",
            "乱答", "随便写", "同上", "无", "null"
        ]
        answer_lower = answer.lower()
        question = self.answers[-1]["question"].lower()

        for pattern in abnormal_patterns:
            if pattern in answer_lower:
                return True

        if answer_lower in question or question in answer_lower:
            return True

        if len(answer_lower) < 5 and len(answer_lower.split()) <= 2:
            return True

        return False

    def finish_exam(self) -> None:
        print(f"\n===== 考试结束 =====")
        print(f"您的总得分：{self.score}/{TOTAL_SCORE}")
        print("\n===== 答题详情 =====")
        for i, item in enumerate(self.answers, 1):
            print(f"\n第{i}题：{item['question']}")
            print(f"您的答案：{item['answer']}")
            print(f"得分：{item['score']}")
            print(f"反馈：{item['feedback']}")


# 4. 评分反馈模块（保持不变）
class ScoringFeedbackModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.llm = OpenAI(
            api_key="sk-faa84a6e39ed4a78a90b49b8fb811bfc",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
            model_name="deepseek-v3"
        )
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points"],
            template="""请基于以下信息评分：
问题：{question}
用户答案：{user_answer}
参考上下文：{context}
核心关键词：{keywords}
满分：{total_points}分

评分规则：
1. 核心关键词出现情况权重60%，扩展内容权重40%
2. 答案正确且完整得满分，部分正确按比例给分，错误得0分
3. 先输出分数（仅数字），再另起一行输出详细反馈（包括正确要点和用户答案优缺点）

评分结果："""
        )

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

        keywords = ", ".join(self.kb_processor.keywords["core"][:10])

        chain = LLMChain(llm=self.llm, prompt=self.scoring_prompt)
        result = chain.run(
            question=question,
            user_answer=user_answer,
            context=context,
            keywords=keywords,
            total_points=total_points
        )

        try:
            score_line, *feedback_lines = result.split("\n")
            score = int(score_line.strip())
            feedback = "\n".join(feedback_lines).strip()
            return max(0, min(total_points, score)), feedback
        except:
            return 0, "评分解析失败，请检查模型输出格式"


# 主程序
def main():
    try:
        kb_processor = KnowledgeBaseProcessor()
        kb_processor.load_documents()
        kb_processor.create_vector_store()
        kb_processor.extract_keywords()

        question_generator = QuestionGenerator(kb_processor)
        exam_interaction = ExamInteractionModule(question_generator)
        scoring_module = ScoringFeedbackModule(kb_processor)

        exam_interaction.start_exam()

        for _ in range(TOTAL_QUESTIONS):
            question = exam_interaction.get_next_question()
            if not question:
                break

            user_answer = exam_interaction.collect_answer(question)

            if exam_interaction.check_abnormal_answer(user_answer):
                exam_interaction.answers[-1]["score"] = 0
                exam_interaction.answers[-1]["feedback"] = "检测到非正常答题行为，本题按0分处理"
                print("得分：0分")
                print("反馈：检测到非正常答题行为，本题按0分处理")
                continue

            points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
            score, feedback = scoring_module.score_answer(
                question,
                user_answer,
                points_per_question
            )

            exam_interaction.score += score
            exam_interaction.answers[-1]["score"] = score
            exam_interaction.answers[-1]["feedback"] = feedback

            print(f"得分：{score}/{points_per_question}分")
            print(f"反馈：{feedback}")

        exam_interaction.finish_exam()

    except Exception as e:
        print(f"\n程序出错：{str(e)}")
        print("请根据提示解决问题后重新运行程序")


if __name__ == "__main__":
    main()