import os
import re
import json
import jieba
import numpy as np
import requests
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import TextLoader, DirectoryLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_core.messages import HumanMessage, SystemMessage
from openai import OpenAI  # 引入OpenAI客户端

# 初始化OpenAI客户端（使用API密钥直接调用）
client = OpenAI(
    # 直接指定API密钥（生产环境建议使用环境变量）
    api_key="sk-47fd418b4a8d4dffab040d3e9a282627",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
)

reasoning_content = ""  # 存储完整思考过程
answer_content = ""  # 存储完整回复内容
is_answering = False  # 标记是否进入正式回复阶段

# 创建流式聊天请求
completion = client.chat.completions.create(
    model="deepseek-r1",  # 模型名称保持不变
    messages=[
        {"role": "user", "content": "9.9和9.11谁大"}
    ],
    stream=True,
    # 可选：启用使用量统计（最后一个chunk返回）
    # stream_options={"include_usage": True}
)

# 打印思考过程分隔线
print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")

# 处理流式响应
for chunk in completion:
    if not chunk.choices:
        # 处理使用量信息（如果启用）
        if hasattr(chunk, 'usage'):
            print("\n" + "=" * 20 + "使用统计" + "=" * 20 + "\n")
            print(f"消耗Token: {chunk.usage.total_tokens}")
        continue

    delta = chunk.choices[0].delta

    # 收集并打印思考过程
    if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
        print(delta.reasoning_content, end='', flush=True)
        reasoning_content += delta.reasoning_content

    # 收集并打印正式回复
    if hasattr(delta, 'content') and delta.content is not None:
        if not is_answering:
            # 打印回复分隔线（首次进入回复阶段时）
            print("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
            is_answering = True
        print(delta.content, end='', flush=True)
        answer_content += delta.content



# 更新 call_deepseek_api 函数支持流式处理（基于模板改写）
def call_deepseek_api(prompt, model="deepseek-v3", temperature=0.7, max_tokens=1000, stream=False):
    """
    使用OpenAI客户端调用DeepSeek模型API生成响应，支持流式处理
    """
    try:
        # 准备消息
        messages = [{'role': 'user', 'content': prompt}]

        # 创建聊天完成请求
        completion = client.chat.completions.create(
            model=model,
            messages=messages,
            temperature=temperature,
            max_tokens=max_tokens,
            top_p=0.6,
            stream=stream
        )

        reasoning_content = ""  # 完整思考过程
        answer_content = ""  # 完整回复内容
        is_answering = False  # 标记是否开始输出最终回复

        if stream:
            print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")

            for chunk in completion:
                # 处理使用量信息（当chunk.choices为空时）
                if not chunk.choices:
                    if hasattr(chunk, 'usage'):
                        print("\nUsage:")
                        print(chunk.usage)
                    continue

                delta = chunk.choices[0].delta

                # 处理思考过程
                if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
                    print(delta.reasoning_content, end='', flush=True)
                    reasoning_content += delta.reasoning_content
                # 处理最终回复
                else:
                    if hasattr(delta, 'content') and delta.content is not None:
                        # 首次进入回复阶段时打印分隔符
                        if not is_answering:
                            print("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
                            is_answering = True
                        print(delta.content, end='', flush=True)
                        answer_content += delta.content

            print("\n")
            return answer_content  # 返回最终回复内容
        else:
            # 非流式处理
            response = completion.choices[0].message.content
            return response

    except Exception as e:
        raise Exception(f"调用失败: {str(e)}")


# ============================== DeepSeek嵌入实现 ==============================
class DeepSeekEmbeddings:
    def __init__(self, api_key, api_base=None, model="deepseek-embedding-v1"):  # 修正嵌入模型名称
        self.api_key = api_key
        self.api_base = api_base or EMBEDDING_API_BASE_URL  # 使用全局配置的嵌入API地址
        self.model = model

    def embed_query(self, text):
        # 单条文本嵌入
        return self.embed_documents([text])[0]

    def embed_documents(self, texts):
        # 批量文本嵌入
        return self._embed(texts)

    def _embed(self, texts):
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }
        payload = {
            "model": self.model,
            "texts": texts  # 适配阿里云嵌入API参数格式
        }
        try:
            response = requests.post(
                self.api_base,
                headers=headers,
                data=json.dumps(payload),
                timeout=30
            )
            response.raise_for_status()
            resp = response.json()
            # 适配阿里云嵌入API返回格式
            if "output" in resp and "embeddings" in resp["output"]:
                return resp["output"]["embeddings"]
            else:
                raise Exception(f"嵌入响应格式未知: {resp}")
        except Exception as e:
            raise Exception(f"嵌入调用失败: {str(e)}")


# 以下部分代码保持不变（知识库处理、题目生成、考试交互等模块）
# ============================== 知识库处理模块 ==============================
class KnowledgeBaseProcessor:
    def __init__(self, file_paths=None):
        # 支持多个文件路径
        if file_paths is None:
            file_paths = ["knowledge.txt"]  # 默认值
        self.file_paths = file_paths
        # 生成唯一的缓存文件名
        cache_id = "_".join([os.path.basename(p) for p in file_paths])
        self.vector_cache_path = f"knowledge_vector_cache_{cache_id}.faiss"
        self.keywords_cache_path = f"keywords_cache_{cache_id}.json"
        self.vector_store = None
        self.keywords = []
        self.stop_words = self._load_stop_words()

    def _load_stop_words(self):
        """加载停用词表"""
        return set(["的", "了", "在", "是", "我", "有", "和", "就", "不", "人", "都", "一个", "也", "要"])

    def _clean_text(self, text):
        """文本清洗"""
        text = re.sub(r'\s+', ' ', text)  # 去除多余空格
        text = re.sub(r'[^\w\s]', '', text)  # 移除标点
        return text.strip()

    def _chinese_tokenizer(self, text):
        """中文分词处理"""
        words = jieba.cut(text)
        return [word for word in words if word not in self.stop_words and len(word) > 1]

    def _extract_keywords(self, docs):
        """使用TF-IDF提取关键词"""
        vectorizer = TfidfVectorizer(tokenizer=self._chinese_tokenizer, max_features=100, token_pattern=None)
        tfidf_matrix = vectorizer.fit_transform(docs)

        # 获取特征词
        feature_names = vectorizer.get_feature_names_out()

        # 提取每个文档的关键词
        all_keywords = []
        for i in range(len(docs)):
            doc_vector = tfidf_matrix[i]
            sorted_indices = np.argsort(doc_vector.toarray()).flatten()[::-1]
            doc_keywords = [feature_names[idx] for idx in sorted_indices[:10] if doc_vector[0, idx] > 0.01]
            all_keywords.extend(doc_keywords)

        # 去重并过滤高频但无关的词
        return list(set(all_keywords))

    def process_knowledge_base(self):
        """处理知识库并缓存结果"""
        # 检查缓存
        if os.path.exists(self.vector_cache_path) and os.path.exists(self.keywords_cache_path):
            # 使用自定义的DeepSeek嵌入
            embeddings = DeepSeekEmbeddings(api_key=API_KEY, api_base=EMBEDDING_API_BASE_URL)

            self.vector_store = FAISS.load_local(self.vector_cache_path, embeddings,
                                                 allow_dangerous_deserialization=True)
            with open(self.keywords_cache_path, 'r', encoding='utf-8') as f:
                self.keywords = json.load(f)
            print(f"从缓存加载知识库: {self.vector_cache_path}")
            return

        # 加载和处理知识库 - 支持DOCX文件
        documents = []
        for path in self.file_paths:
            if os.path.isdir(path):
                # 加载目录下所有文本文件
                txt_loader = DirectoryLoader(path, glob="**/*.txt", loader_cls=TextLoader)
                txt_docs = txt_loader.load()

                # 加载目录下所有DOCX文件
                docx_loader = DirectoryLoader(path, glob="**/*.docx", loader_cls=Docx2txtLoader)
                docx_docs = docx_loader.load()

                documents.extend(txt_docs)
                documents.extend(docx_docs)
                print(f"加载目录: {path}, 包含 {len(txt_docs)} 个TXT文档, {len(docx_docs)} 个DOCX文档")
            elif os.path.isfile(path):
                # 根据文件扩展名选择加载器
                if path.lower().endswith('.txt'):
                    loader = TextLoader(path)
                    documents.extend(loader.load())
                    print(f"加载TXT文件: {path}")
                elif path.lower().endswith('.docx'):
                    # 处理DOCX文件
                    loader = Docx2txtLoader(path)
                    documents.extend(loader.load())
                    print(f"加载DOCX文件: {path}")
                else:
                    print(f"警告: 不支持的文件类型 - {path}")
            else:
                print(f"警告: 路径不存在 - {path}")

        if not documents:
            raise ValueError("未找到任何知识库文件")

        # 文本分割
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
        docs = text_splitter.split_documents(documents)

        # 提取文本内容用于关键词分析
        text_contents = [self._clean_text(doc.page_content) for doc in docs]
        self.keywords = self._extract_keywords(text_contents)
        print(f"提取关键词: {len(self.keywords)} 个")

        # 创建向量存储 - 使用自定义的DeepSeek嵌入
        embeddings = DeepSeekEmbeddings(api_key=API_KEY, api_base=EMBEDDING_API_BASE_URL)
        self.vector_store = FAISS.from_documents(docs, embeddings)

        # 保存缓存
        self.vector_store.save_local(self.vector_cache_path)
        with open(self.keywords_cache_path, 'w', encoding='utf-8') as f:
            json.dump(self.keywords, f, ensure_ascii=False)
        print(f"知识库处理完成，缓存保存至: {self.vector_cache_path}")


# ============================== 题目生成模块 ==============================
class QuestionGenerator:
    def __init__(self, knowledge_processor):
        self.knowledge_processor = knowledge_processor
        self.backup_questions = self._generate_backup_questions()
        self.forbidden_phrases = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]

    def _generate_backup_questions(self):
        """生成保底题库"""
        return [
            "请解释主要概念",
            "描述关键原理",
            "说明核心过程",
            "列举重要特征",
            "分析主要影响因素"
        ]

    def _contains_forbidden_phrase(self, question):
        """检查是否包含禁止短语"""
        return any(phrase in question for phrase in self.forbidden_phrases)

    def _semantic_similarity(self, new_question, existing_questions):
        """计算语义相似度"""
        if not existing_questions:
            return 0

        # 使用自定义的DeepSeek嵌入
        embeddings = DeepSeekEmbeddings(api_key=API_KEY, api_base=EMBEDDING_API_BASE_URL)

        # 向量化问题
        new_vector = embeddings.embed_query(new_question)
        existing_vectors = [embeddings.embed_query(q) for q in existing_questions]

        # 计算相似度
        similarities = cosine_similarity([new_vector], existing_vectors)[0]
        return max(similarities) if similarities.size > 0 else 0

    # 修改 QuestionGenerator 类中的 generate_question 方法
    def generate_question(self, existing_questions=[]):
        """生成符合要求的题目"""
        # 创建提示模板
        prompt_template = """你是一个专业的题目生成助手，请基于以下知识库关键词生成考试题目。
请生成一个考试题目，要求：
1. 必须包含至少一个以下关键词：{keywords}
2. 避免使用任何图像或代码相关描述
3. 题目应为开放式问题
4. 不要包含答案提示

当前已生成题目：{existing_questions_str}"""

        # 尝试生成题目
        max_attempts = 5
        for attempt in range(max_attempts):
            # 选择关键词（确保至少包含一个）
            selected_keywords = np.random.choice(
                self.knowledge_processor.keywords,
                size=min(3, len(self.knowledge_processor.keywords)),
                replace=False
            )

            # 格式化提示
            prompt = prompt_template.format(
                keywords=", ".join(selected_keywords),
                existing_questions_str="\n".join(existing_questions) if existing_questions else "无"
            )

            # 调用API生成题目 - 使用流式处理显示思考过程
            try:
                question = call_deepseek_api(
                    prompt=prompt,  # 直接传递提示内容
                    model="deepseek-v3",
                    stream=True  # 启用流式处理
                )
            except Exception as e:
                print(f"题目生成失败: {str(e)}")
                # 失败后使用保底题库
                return np.random.choice(self.backup_questions)

            # 检查是否包含禁止短语
            if self._contains_forbidden_phrase(question):
                continue

            # 检查语义相似度
            similarity = self._semantic_similarity(question, existing_questions)
            if similarity < 0.7:
                return question.strip()

        # 多次尝试失败后使用保底题库
        return np.random.choice(self.backup_questions)


# ============================== 考试交互模块 ==============================
class ExamController:
    def __init__(self, question_generator):
        self.question_generator = question_generator
        self.abnormal_responses = ["不会", "没学", "不懂", "不知道", "乱答", "跳过", "放弃"]
        self.exam_history = []  # 记忆用户考试历史

    def _is_abnormal_response(self, answer, question):
        """检测非正常答题行为"""
        # 检查简短回答
        if len(answer) < 10:
            return True

        # 检查异常关键词
        if any(response in answer for response in self.abnormal_responses):
            return True

        # 检查答案是否复制题目
        if answer.strip() == question.strip():
            return True

        # 检查答案是否仅包含关键词
        keywords_in_answer = sum(1 for kw in self.question_generator.knowledge_processor.keywords if kw in answer)
        if keywords_in_answer > 0 and len(answer) < 20:
            return True

        return False

    def conduct_exam(self):
        """执行考试流程"""
        exam_questions = []
        user_answers = []
        scores = []
        feedbacks = []

        print("===== 考试开始 =====")
        print(f"本次考试共 5 题，满分 50 分\n")

        # 生成并回答5道题目
        for i in range(5):
            # 生成题目
            question = self.question_generator.generate_question(exam_questions)
            exam_questions.append(question)

            print(f"\n题目 {i + 1}/5: {question}")
            print("请回答（输入空行结束作答）：")

            # 获取多行输入
            answer_lines = []
            while True:
                line = input()
                if line.strip() == "":
                    break
                answer_lines.append(line)

            answer = "\n".join(answer_lines)
            user_answers.append(answer)

            # 检测非正常答题
            if self._is_abnormal_response(answer, question):
                print("检测到非正常答题行为，本题得0分")
                scores.append(0)
                feedbacks.append("非正常答题行为")
                continue

            # 正常评分
            scorer = AnswerScorer(question, answer, self.question_generator.knowledge_processor.keywords)
            score, feedback = scorer.score_answer()
            scores.append(score)
            feedbacks.append(feedback)

            print(f"\n本题得分：{score}/10")
            print(f"反馈：{feedback}")

        # 打印考试结果
        total_score = sum(scores)
        print("\n===== 考试结束 =====")
        print(f"总分：{total_score}/50")

        # 详细反馈
        for i in range(5):
            print(f"\n题目 {i + 1}: {exam_questions[i]}")
            print(f"你的回答：{user_answers[i]}")
            print(f"得分：{scores[i]}/10 - 反馈：{feedbacks[i]}")

        # 保存考试历史
        self.exam_history.append({
            "questions": exam_questions,
            "answers": user_answers,
            "scores": scores,
            "total_score": total_score
        })

        return total_score


# ============================== 评分反馈模块 ==============================
class AnswerScorer:
    def __init__(self, question, answer, keywords):
        self.question = question
        self.answer = answer
        self.keywords = keywords
        self.core_keywords = self._identify_core_keywords()

    def _identify_core_keywords(self):
        """识别题目中的核心关键词"""
        return [kw for kw in self.keywords if kw in self.question]

    def _score_keywords(self):
        """基于关键词评分"""
        # 核心关键词权重更高
        core_score = 0
        for kw in self.core_keywords:
            if kw in self.answer:
                core_score += 4  # 每个核心关键词4分

        # 扩展关键词权重较低
        extended_keywords = [kw for kw in self.keywords if kw not in self.core_keywords]
        extended_score = 0
        for kw in extended_keywords:
            if kw in self.answer:
                extended_score += 1  # 每个扩展关键词1分

        return min(10, core_score + extended_score)

    def _generate_feedback(self, score):
        """生成评分反馈"""
        missing_core = [kw for kw in self.core_keywords if kw not in self.answer]

        feedback = ""
        if score >= 8:
            feedback = "回答全面，涵盖了主要概念"
        elif score >= 5:
            feedback = "回答基本正确，但可以更深入"
        else:
            feedback = "回答不完整，需要补充关键知识点"

        if missing_core:
            feedback += f"。未涉及的核心概念：{', '.join(missing_core)}"

        return feedback

    def score_answer(self):
        """评分主方法"""
        # 关键词评分
        keyword_score = self._score_keywords()

        # 综合评分
        final_score = min(10, keyword_score)  # 确保不超过10分

        # 生成反馈
        feedback = self._generate_feedback(final_score)

        return final_score, feedback


# ============================== 智能代理模块 ==============================
class ExamAgent:
    def __init__(self, knowledge_processor):
        self.knowledge_processor = knowledge_processor
        self.question_generator = QuestionGenerator(knowledge_processor)
        self.exam_controller = ExamController(self.question_generator)
        self.user_profile = {}  # 存储用户信息

    def start_exam(self):
        """开始考试流程"""
        print("===== 智能考试系统 =====")
        print("1. 开始新考试")
        print("2. 查看历史成绩")
        print("3. 退出系统")

        choice = input("请选择操作: ")

        if choice == "1":
            # 开始新考试
            print("\n===== 新考试开始 =====")
            self.exam_controller.conduct_exam()
        elif choice == "2":
            # 查看历史成绩
            self.show_exam_history()
        elif choice == "3":
            print("系统退出")
            exit()
        else:
            print("无效选择，请重新输入")
            self.start_exam()

    def show_exam_history(self):
        """显示考试历史"""
        if not self.exam_controller.exam_history:
            print("\n暂无历史考试记录")
            return

        print("\n===== 历史考试记录 =====")
        for i, exam in enumerate(self.exam_controller.exam_history, 1):
            print(f"\n考试 {i}: 总分 {exam['total_score']}/50")
            for j in range(5):
                print(f"  题目 {j + 1}: {exam['questions'][j]}")
                print(f"    得分: {exam['scores'][j]}/10")

        input("\n按回车键返回主菜单...")
        self.start_exam()


# ============================== 主程序 ==============================
if __name__ == "__main__":
    # 初始化知识库处理器 - 使用原始字符串处理Windows路径
    file_paths = [
        r"D:\桌面\第3章 数据获取.docx",  # 添加r前缀确保路径正确解析
    ]

    # 初始化知识库
    kb_processor = KnowledgeBaseProcessor(file_paths)
    kb_processor.process_knowledge_base()

    # 初始化智能代理
    exam_agent = ExamAgent(kb_processor)

    # 启动系统
    exam_agent.start_exam()