import os

import torch

# 设置环境变量以强制使用本地模型和禁用数据集下载
os.environ['TRANSFORMERS_OFFLINE'] = '1'
os.environ['HF_DATASETS_OFFLINE'] = '1'

import jieba
from langchain.memory import ConversationBufferMemory
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import Docx2txtLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpenAI
from typing import List, Dict, Tuple, Optional
import time
import networkx as nx
from transformers import AutoTokenizer, AutoModel
from sentence_transformers import SentenceTransformer, util

# 配置参数
KNOWLEDGE_FILE = "D:\\桌面\\第3章 数据获取.docx"
VECTOR_STORE_PATH = "./vector_store"
CACHE_DIR = "./cache"
SIMILARITY_THRESHOLD = 0.7
MAX_GENERATION_ATTEMPTS = 5
TOTAL_QUESTIONS = 5
TOTAL_SCORE = 50

# 初始化缓存目录
os.makedirs(CACHE_DIR, exist_ok=True)
os.makedirs(VECTOR_STORE_PATH, exist_ok=True)

# 1. 知识库处理模块
class KnowledgeBaseProcessor:
    def __init__(self, knowledge_file: str = KNOWLEDGE_FILE):
        self.knowledge_file = knowledge_file
        self.vector_store = None
        self.documents = None
        self.keywords = None
        local_model_path = "D:\\local_models\\paraphrase-multilingual-MiniLM-L12-v2"
        self.tokenizer = AutoTokenizer.from_pretrained(local_model_path)
        self.model = AutoModel.from_pretrained(local_model_path)

    def _jieba_tokenize(self, text: str) -> List[str]:
        """使用jieba进行中文分词"""
        return list(jieba.cut(text))

    def load_documents(self) -> None:
        """加载docx文件"""
        if not os.path.exists(self.knowledge_file):
            raise FileNotFoundError(f"文件不存在：{self.knowledge_file}")

        if not self.knowledge_file.lower().endswith(".docx"):
            raise ValueError(f"文件格式错误：{self.knowledge_file}")

        loader = Docx2txtLoader(self.knowledge_file)
        self.documents = loader.load()
        print(f"成功加载docx文件：{self.knowledge_file}（共{len(self.documents)}个文档对象）")

    def split_documents(self) -> List:
        """将文档拆分为较小的段落"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        """创建或加载文档的向量存储"""
        if os.path.exists(VECTOR_STORE_PATH) and os.listdir(VECTOR_STORE_PATH):
            self.vector_store = FAISS.load_local(VECTOR_STORE_PATH, HuggingFaceEmbeddings(),
                                                 allow_dangerous_deserialization=True)
            print("已加载缓存的向量存储")
        else:
            splits = self.split_documents()
            # 使用本地模型创建嵌入
            local_model_path = "D:\\local_models\\paraphrase-multilingual-MiniLM-L12-v2"
            embeddings = HuggingFaceEmbeddings(
                model_name_or_path=local_model_path,
                model_kwargs={'device': 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
            self.vector_store = FAISS.from_documents(splits, embeddings)
            self.vector_store.save_local(VECTOR_STORE_PATH)  # 保存到本地
            print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        """从文档中提取关键词"""
        if self.documents is None:
            self.load_documents()

        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)

        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}

        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight

        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],
            "extended": [kw for kw, _ in sorted_keywords[20:100]]
        }
        print(f"提取关键词：核心{len(self.keywords['core'])}个，扩展{len(self.keywords['extended'])}个")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        """根据查询获取相关文档"""
        if self.vector_store is None:
            self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)

# 2. 题目生成模块
class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.client = OpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self.generated_questions = []
        self.backup_questions = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.question_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points", "similarity_score"],
            template="""作为一个专业的AI考试出题员，请根据以下信息生成问题：
            ...
            - **语义相似度参考:** 您的答案与标准知识的语义相似度为 {similarity_score:.2f} (越高越好)。
            ...
            """
        )

        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题：
            关键词：{keywords}
            上下文：{context}
            要求：每个问题包含至少一个关键词，不要编号，每行一个问题，禁止出现"如图"“所示”等图片相关词汇
            问题："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        """检查文本中是否包含禁止词"""
        forbidden = ["如图", "如图所示", "图例", "源程序", "所示","源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def calculate_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本之间的语义相似度"""
        # 补充本地模型路径
        local_model_path = "D:\\local_models\\paraphrase-multilingual-MiniLM-L12-v2"
        tokenizer = AutoTokenizer.from_pretrained(local_model_path)
        model = AutoModel.from_pretrained(local_model_path)

        inputs = tokenizer([text1, text2], padding=True, truncation=True, return_tensors='pt')
        with torch.no_grad():
            embeddings = model(**inputs).last_hidden_state.mean(dim=1)
        similarity = cosine_similarity(embeddings[0].unsqueeze(0), embeddings[1].unsqueeze(0))
        return similarity.item()



    def _generate_backup_questions(self):
        """生成备选问题"""
        if not self.backup_questions:
            context = "\n\n".join([doc.page_content[:1000] for doc in self.kb_processor.documents[:3]])
            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10])

            result = self.get_ai_response(
                prompt=self.backup_prompt.format(context=context, keywords=keywords_str)
            )
            self.backup_questions = [q.strip() for q in result.split("\n") if q.strip()]
            print(f"生成保底题库：{len(self.backup_questions)}题")

    def get_ai_response(self, prompt: str, max_retries: int = 3) -> str:
        """与AI进行交互获取响应"""
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-v3",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.7,
                    timeout=30
                )
                return response.choices[0].message.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                print(f"API调用失败，正在重试({retry_count}/{max_retries})...")
                time.sleep(2)
        return ""

    def generate_question(self) -> str:
        """生成问题"""
        attempts = 0

        self._generate_backup_questions()

        while attempts < MAX_GENERATION_ATTEMPTS:
            try:
                keywords = np.random.choice(
                    self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"],
                    size=np.random.randint(1, 3)
                )
                keyword_str = ", ".join(keywords)

                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

                prompt_content = self.question_prompt.format(
                    context=context,
                    keywords=keyword_str
                )
                question = self.get_ai_response(prompt_content).strip()

                if self._check_forbidden_words(question):
                    attempts += 1
                    continue

                # 检查生成的问题是否相似
                if not any(self._calculate_similarity(question, q) > SIMILARITY_THRESHOLD for q in self.generated_questions):
                    self.generated_questions.append(question)
                    return question

            except Exception as e:
                print(f"题目生成失败（尝试{attempts + 1}/{MAX_GENERATION_ATTEMPTS}）：{str(e)}")

            attempts += 1
            time.sleep(1)

        print("使用保底题目")
        for q in self.backup_questions:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q

        return np.random.choice(self.backup_questions)

# 3. 考试交互模块
class ExamInteractionModule:
    def __init__(self, question_generator: QuestionGenerator):
        self.question_generator = question_generator
        self.memory = ConversationBufferMemory(memory_key="chat_history")
        self.score = 0
        self.current_question = 0
        self.answers = []

    def start_exam(self) -> None:
        """开始考试"""
        print(f"===== 考试开始 =====")
        print(f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分")
        print(f"每题答完后将自动评分，输入空行结束答题\n")

    def get_next_question(self) -> Optional[str]:
        """获取下一道题目"""
        if self.current_question < TOTAL_QUESTIONS:
            self.current_question += 1
            return self.question_generator.generate_question()
        return None

    def collect_answer(self, question: str) -> str:
        """收集用户的答案"""
        print(f"\n第{self.current_question}题：{question}")
        print("请输入答案（输入空行结束）：")

        answer_lines = []
        while True:
            line = input()
            if not line:
                break
            answer_lines.append(line)

        user_answer = "\n".join(answer_lines)
        self.answers.append({
            "question": question,
            "answer": user_answer,
            "score": 0,
            "feedback": ""
        })
        return user_answer

    def check_abnormal_answer(self, answer: str) -> bool:
        """检查用户答案是否异常"""
        abnormal_patterns = [
            "不会", "没学", "不知道", "不懂", "略",
            "乱答", "随便写", "同上", "无", "null"
        ]
        answer_lower = answer.lower()
        question = self.answers[-1]["question"].lower()

        for pattern in abnormal_patterns:
            if pattern in answer_lower:
                return True

        if answer_lower in question or question in answer_lower:
            return True

        if len(answer_lower) < 5 and len(answer_lower.split()) <= 2:
            return True

        return False

    def finish_exam(self) -> None:
        """结束考试"""
        print(f"\n===== 考试结束 =====")
        print(f"您的总得分：{self.score}/{TOTAL_SCORE}")
        print("\n===== 答题详情 =====")
        for i, item in enumerate(self.answers, 1):
            print(f"\n第{i}题：{item['question']}")
            print(f"您的答案：{item['answer']}")
            print(f"得分：{item['score']}")
            print(f"反馈：{item['feedback']}")

# 4. 评分反馈模块
class ScoringFeedbackModule:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.client = OpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self._init_prompt_templates()
        self.similarity_model = SentenceTransformer("D:\\local_models\\paraphrase-multilingual-MiniLM-L12-v2")


    def _init_prompt_templates(self):
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points"],
            template="""作为一个专业的AI考试评分员，请严格按照以下要求对用户答案进行评分和反馈。
            ...
            """
        )

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        """评分用户答案"""
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])

        similarity_score = self._calculate_semantic_similarity(user_answer, context)

        keywords = ", ".join(self.kb_processor.keywords["core"][:10])

        prompt_content = self.scoring_prompt.format(
            question=question,
            user_answer=user_answer,
            context=context,
            keywords=keywords,
            total_points=total_points
        )

        result = self.get_ai_response(prompt_content)

        try:
            score_line, *feedback_lines = result.split("\n")
            score = int(score_line.strip())
            feedback = "\n".join(feedback_lines).strip()
            return max(0, min(total_points, score)), feedback
        except Exception as e:
            print(f"评分解析失败：{str(e)}")
            return 0, "评分解析失败，请检查模型输出格式"

    def _calculate_semantic_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本之间的语义相似度"""
        try:
            embeddings = self.similarity_model.encode([text1, text2], convert_to_tensor=True)
            cosine_score = util.cos_sim(embeddings[0], embeddings[1])
            return cosine_score.item()
        except Exception as e:
            print(f"计算语义相似度失败：{str(e)}")
            return 0.0

# 5. 知识图谱验证模块
class KnowledgeGraphValidator:
    def __init__(self, documents):
        self.graph = nx.DiGraph()
        self._build_graph(documents)

    def _build_graph(self, documents):
        """从文档中构建知识图谱"""
        # 复杂的一步：填充self.graph
        pass

    def validate_logic(self, user_answer: str) -> dict:
        """验证用户答案的逻辑一致性"""
        entities_in_answer = self._extract_entities(user_answer)

        coverage = 0
        is_logical = False

        if len(entities_in_answer) > 1:
            start_node = entities_in_answer[0]
            end_node = entities_in_answer[-1]
            if self.graph.has_node(start_node) and self.graph.has_node(end_node):
                if nx.has_path(self.graph, start_node, end_node):
                    is_logical = True

        return {"logical_consistency": is_logical, "entity_coverage": coverage}

    def _extract_entities(self, text):
        """从文本中提取已知实体"""
        return []

# 主程序
def main():
    try:
        kb_processor = KnowledgeBaseProcessor()
        kb_processor.load_documents()
        kb_processor.create_vector_store()
        kb_processor.extract_keywords()

        question_generator = QuestionGenerator(kb_processor)
        exam_interaction = ExamInteractionModule(question_generator)
        scoring_module = ScoringFeedbackModule(kb_processor)

        exam_interaction.start_exam()

        for _ in range(TOTAL_QUESTIONS):
            question = exam_interaction.get_next_question()
            if not question:
                break

            user_answer = exam_interaction.collect_answer(question)

            if exam_interaction.check_abnormal_answer(user_answer):
                exam_interaction.answers[-1]["score"] = 0
                exam_interaction.answers[-1]["feedback"] = "检测到非正常答题行为，本题按0分处理"
                print("得分：0分")
                print("反馈：检测到非正常答题行为，本题按0分处理")
                continue

            points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
            score, feedback = scoring_module.score_answer(
                question,
                user_answer,
                points_per_question
            )

            exam_interaction.score += score
            exam_interaction.answers[-1]["score"] = score
            exam_interaction.answers[-1]["feedback"] = feedback

            print(f"得分：{score}/{points_per_question}分")
            print(f"反馈：{feedback}")

        exam_interaction.finish_exam()

    except Exception as e:
        print(f"\n程序出错：{str(e)}")
        print("请根据提示解决问题后重新运行程序")

if __name__ == "__main__":
    main()
