import os
import chainlit as cl
import jieba
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from langchain_community.document_loaders import Docx2txtLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from openai import OpenAI
from typing import List, Dict, Tuple, Optional
import time

# 配置环境变量和文件路径
os.environ['TRANSFORMERS_OFFLINE'] = '1'
os.environ['HF_DATASETS_OFFLINE'] = '1'
KNOWLEDGE_FILES = [
    "D:\\桌面\\第3章 数据获取.docx",
    "D:\\桌面\\大数据应用基础教程.txt"
]
VECTOR_STORE_PATH = "./vector_store"
TOTAL_QUESTIONS = 5
TOTAL_SCORE = 50


# --- 核心逻辑模块 ---

class KnowledgeBaseProcessor:
    def __init__(self, knowledge_files: list = KNOWLEDGE_FILES):
        self.knowledge_files = knowledge_files
        self.vector_store = None
        self.documents = []
        self.keywords = None
        self.tfidf_vectorizer = TfidfVectorizer(tokenizer=self._jieba_tokenize)

    def _jieba_tokenize(self, text: str) -> List[str]:
        return list(jieba.cut(text))

    def load_documents(self) -> None:
        self.documents = []
        encodings_to_try = ["utf-8", "gbk", "latin-1"]
        for file_path in self.knowledge_files:
            file_docs = []
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件不存在：{file_path}")
            file_path = os.path.normpath(file_path)
            print(f"准备加载文件: {file_path}")

            if file_path.lower().endswith(".docx"):
                try:
                    loader = Docx2txtLoader(file_path)
                    file_docs = loader.load()
                    print(f"成功加载docx文件: {file_path}")
                except Exception as e:
                    raise Exception(f"加载docx文件失败：{file_path}\n错误信息：{str(e)}")
            elif file_path.lower().endswith(".txt"):
                loaded_successfully = False
                for encoding in encodings_to_try:
                    try:
                        loader = TextLoader(file_path, encoding=encoding)
                        file_docs = loader.load()
                        print(f"成功使用 {encoding} 编码加载文件: {file_path}")
                        loaded_successfully = True
                        break
                    except UnicodeDecodeError as e:
                        print(f"{encoding} 编码加载失败: {str(e)}")
                    except Exception as e:
                        print(f"加载txt文件失败（使用{encoding}编码）：{file_path}\n错误信息：{str(e)}")
                if not loaded_successfully:
                    raise RuntimeError(f"加载txt文件失败：{file_path}\n错误信息：所有尝试的编码均失败。")
            else:
                raise ValueError(f"不支持的文件格式：{file_path}")
            self.documents.extend(file_docs)
            print(f"成功加载文件：{file_path}（{len(file_docs)}个文档对象）")
        print(f"共加载{len(self.documents)}个文档对象")

    def split_documents(self) -> List:
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500, chunk_overlap=50, separators=["\n\n", "\n", "。", "，", " "]
        )
        return text_splitter.split_documents(self.documents)

    def create_vector_store(self) -> None:
        embeddings = HuggingFaceEmbeddings(
            model_name="D:\\local_models\\all-MiniLM-L6-v2",
            model_kwargs={'device': 'cpu'},
            encode_kwargs={'normalize_embeddings': True}
        )
        if os.path.exists(VECTOR_STORE_PATH) and os.listdir(VECTOR_STORE_PATH):
            self.vector_store = FAISS.load_local(
                VECTOR_STORE_PATH, embeddings, allow_dangerous_deserialization=True
            )
            print("已加载缓存的向量存储")
        else:
            splits = self.split_documents()
            self.vector_store = FAISS.from_documents(splits, embeddings)
            self.vector_store.save_local(VECTOR_STORE_PATH)
            print("已创建新的向量存储并缓存")

    def extract_keywords(self) -> None:
        if not self.documents: self.load_documents()
        texts = [" ".join(self._jieba_tokenize(doc.page_content)) for doc in self.documents]
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(texts)
        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        keywords_with_weights = {}
        for i in range(len(texts)):
            doc_weights = tfidf_matrix[i].toarray()[0]
            for idx, weight in enumerate(doc_weights):
                if weight > 0:
                    keyword = feature_names[idx]
                    keywords_with_weights[keyword] = keywords_with_weights.get(keyword, 0) + weight
        sorted_keywords = sorted(keywords_with_weights.items(), key=lambda x: x[1], reverse=True)
        self.keywords = {
            "core": [kw for kw, _ in sorted_keywords[:20]],
            "extended": [kw for kw, _ in sorted_keywords[20:100]]
        }
        print(f"提取关键词：核心{len(self.keywords['core'])}个，扩展{len(self.keywords['extended'])}个")

    def retrieve_relevant_docs(self, query: str, top_k: int = 3) -> List:
        if self.vector_store is None: self.create_vector_store()
        return self.vector_store.similarity_search(query, k=top_k)


class QuestionGenerator:
    def __init__(self, knowledge_processor: KnowledgeBaseProcessor):
        self.kb_processor = knowledge_processor
        self.client = OpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self.generated_questions = []
        self.backup_questions = []
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.question_prompt = PromptTemplate(
            input_variables=["context", "keywords", "forbidden_words"],
            template="""基于以下上下文和关键词，生成一个问题：
关键词：{keywords}
上下文：{context}
要求：
1. 问题必须包含至少一个关键词
2. 禁止出现这些词：{forbidden_words}
3. 题型是开放性简答题，考察对核心概念的理解
4. 问题清晰无歧义，与上下文紧密相关
5. 每次输出题目时不要输出解析

生成的问题："""
        )
        self.backup_prompt = PromptTemplate(
            input_variables=["context", "keywords"],
            template="""基于以下上下文和关键词，生成10个基础问题：
关键词：{keywords}
上下文：{context}
要求：每个问题包含至少一个关键词，不要编号，每行一个问题，禁止出现"如图"“所示”等图片相关词汇

问题："""
        )

    def _check_forbidden_words(self, text: str) -> bool:
        forbidden = ["如图", "如图所示", "图例", "源程序", "所示", "源代码", "截图", "示例程序"]
        return any(word in text for word in forbidden)

    def _calculate_similarity(self, question1: str, question2: str) -> float:
        vectorizer = TfidfVectorizer(tokenizer=self.kb_processor._jieba_tokenize)
        tfidf_matrix = vectorizer.fit_transform([question1, question2])
        return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]

    def _generate_backup_questions(self):
        if not self.backup_questions:
            context = "\n\n".join([doc.page_content[:1000] for doc in self.kb_processor.documents[:3]])
            keywords_str = ", ".join(self.kb_processor.keywords["core"][:10])
            result = self.get_ai_response(
                prompt=self.backup_prompt.format(context=context, keywords=keywords_str)
            )
            self.backup_questions = [q.strip() for q in result.split("\n") if q.strip()]
            print(f"生成保底题库：{len(self.backup_questions)}题")

    def get_ai_response(self, prompt: str, max_retries: int = 3) -> str:
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-v3",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.7,
                    timeout=30
                )
                return response.choices[0].message.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                time.sleep(2)
        return ""

    def generate_question(self) -> str:
        forbidden_words = ["如图", "如图所示", "图例", "源程序", "源代码", "截图", "示例程序"]
        attempts = 0
        self._generate_backup_questions()
        while attempts < 5:
            try:
                keywords = np.random.choice(
                    self.kb_processor.keywords["core"] + self.kb_processor.keywords["extended"],
                    size=np.random.randint(1, 3)
                )
                keyword_str = ", ".join(keywords)
                relevant_docs = self.kb_processor.retrieve_relevant_docs(keyword_str)
                context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])
                prompt_content = self.question_prompt.format(
                    context=context, keywords=keyword_str, forbidden_words=", ".join(forbidden_words)
                )
                question = self.get_ai_response(prompt_content).strip()
                if self._check_forbidden_words(question):
                    attempts += 1
                    continue
                similar = False
                for q in self.generated_questions:
                    if self._calculate_similarity(question, q) > 0.7:
                        similar = True
                        break
                if not similar:
                    self.generated_questions.append(question)
                    return question
            except Exception as e:
                print(f"题目生成失败（尝试{attempts + 1}/5）：{str(e)}")
            attempts += 1
            time.sleep(1)
        print("使用保底题目")
        for q in self.backup_questions:
            if q not in self.generated_questions and not self._check_forbidden_words(q):
                self.generated_questions.append(q)
                return q
        return np.random.choice(self.backup_questions)


class ScoringFeedbackModule:
    def __init__(self, knowledge_processor):
        self.kb_processor = knowledge_processor
        self.client = OpenAI(
            api_key="sk-ce186a5b911341ba983baf0bf1ee2904",
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
        self._init_prompt_templates()

    def _init_prompt_templates(self):
        self.scoring_prompt = PromptTemplate(
            input_variables=["question", "user_answer", "context", "keywords", "total_points"],
            template="""作为一个专业的AI考试评分员，请严格按照以下要求对用户答案进行评分和反馈。

**评分背景:**
- **问题:** {question}
- **标准参考知识:** {context}
- **核心关键词:** {keywords}
- **用户答案:** {user_answer}
- **本题满分:** {total_points}分

**评分维度与规则 (总分100%):**
1.  **知识点覆盖度 (权重 60%):**
    - 评估用户答案是否准确、完整地覆盖了核心关键词和参考知识中的关键信息点。
    - 完全覆盖得60%的分数，部分覆盖按比例给分，完全错误不得分。

2.  **逻辑关联性 (权重 30%):**
    - 评估答案的组织结构是否清晰，论述是否条理分明，前后逻辑是否连贯自洽。
    - 逻辑优秀得30%的分数，逻辑混乱或有矛盾扣分。

3.  **格式与表达规范性 (权重 10%):**
    - 评估答案是否使用了恰当的术语，无明显错别字，表达流畅，格式整洁。
    - 规范性好得10%的分数，有较多错别字、表述不清或格式混乱酌情扣分。

**输出要求:**
1.  **第一行必须只输出最终计算出的总分（一个数字，范围0-{total_points}）。**
2.  **从第二行开始，提供详细的反馈，需要包含以下几点：**
    - **[知识点覆盖]:** 指出用户答对了哪些要点，遗漏了哪些要点。
    - **[逻辑关联]:** 评价答案的逻辑结构和条理性。
    - **[格式表达]:** 对答案的格式和语言表达给出建议。
    - **[总结]:** 对整体表现进行总结。

请开始评分。"""
        )

    def get_ai_response(self, prompt: str, max_retries: int = 3) -> str:
        retry_count = 0
        while retry_count < max_retries:
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-v3",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.7,
                    timeout=30
                )
                return response.choices[0].message.content
            except Exception as e:
                retry_count += 1
                if retry_count >= max_retries:
                    raise Exception(f"API请求失败: {str(e)}")
                time.sleep(2)
        return ""

    def score_answer(self, question: str, user_answer: str, total_points: int) -> Tuple[int, str]:
        relevant_docs = self.kb_processor.retrieve_relevant_docs(question)
        context = "\n\n".join([doc.page_content for doc in relevant_docs[:2]])
        keywords = ", ".join(self.kb_processor.keywords["core"][:10])
        prompt_content = self.scoring_prompt.format(
            question=question, user_answer=user_answer, context=context, keywords=keywords, total_points=total_points
        )
        result = self.get_ai_response(prompt_content)
        try:
            score_line, *feedback_lines = result.split("\n")
            score = int(score_line.strip())
            feedback = "\n".join(feedback_lines).strip()
            return max(0, min(total_points, score)), feedback
        except Exception as e:
            return 0, f"评分解析失败: {str(e)}"


def check_abnormal_answer(answer: str, question: str) -> bool:
    """
    检查用户答案是否为非正常回答。
    """
    abnormal_patterns = ["不会", "没学", "不知道", "不懂", "略", "乱答", "随便写", "同上", "无", "null"]
    answer_lower = answer.lower().strip()
    question_lower = question.lower().strip()

    if not answer_lower:
        return True

    if answer_lower.isdigit():
        return False

    for pattern in abnormal_patterns:
        if pattern in answer_lower:
            return True

    words = list(jieba.cut(answer_lower))
    if len(answer_lower) < 10 or len(words) <= 2:
        return True

    if question_lower in answer_lower or answer_lower in question_lower:
        if len(answer_lower) > 5 and len(question_lower) > 5:
            return True

    return False


# --- Chainlit UI 模块集成 ---

@cl.on_chat_start
async def start_chat():
    """
    在 Chainlit 会话开始时执行，初始化所有核心模块和会话状态。
    """
    kb_processor = KnowledgeBaseProcessor()
    cl.user_session.set("kb_processor", kb_processor)

    await cl.Message(content="正在初始化知识库...").send()
    try:
        # 使用 await cl.Message 替代 cl.Status
        await cl.Message(content="加载文档中...", author="系统").send()
        kb_processor.load_documents()

        await cl.Message(content="创建向量存储中...", author="系统").send()
        kb_processor.create_vector_store()

        await cl.Message(content="提取关键词中...", author="系统").send()
        kb_processor.extract_keywords()

        await cl.Message(content="知识库初始化完成。").send()
    except Exception as e:
        await cl.Message(content=f"知识库初始化失败：{str(e)}").send()
        return

    question_generator = QuestionGenerator(kb_processor)
    scoring_module = ScoringFeedbackModule(kb_processor)
    cl.user_session.set("question_generator", question_generator)
    cl.user_session.set("scoring_module", scoring_module)
    cl.user_session.set("current_question_index", 0)
    cl.user_session.set("total_score", 0)
    cl.user_session.set("exam_answers", [])

    await cl.Message(content=f"===== 考试开始 =====").send()
    await cl.Message(content=f"本次考试共{TOTAL_QUESTIONS}题，满分{TOTAL_SCORE}分。").send()
    await cl.Message(content="请在输入框中输入答案，然后直接按回车提交。").send()

    await send_next_question()


@cl.on_message
async def handle_message(message: cl.Message):
    """
    处理用户发送的消息，即用户的答案。
    """
    current_question_index = cl.user_session.get("current_question_index")
    exam_answers = cl.user_session.get("exam_answers", [])

    # 修正后的逻辑：检查最后一个问题是否已经有答案
    if not exam_answers or "answer" in exam_answers[-1]:
        await cl.Message(content="请等待出题，然后再提交答案。").send()
        return

    scoring_module = cl.user_session.get("scoring_module")
    total_score = cl.user_session.get("total_score")

    current_question_data = exam_answers[-1]
    current_question = current_question_data["question"]
    user_answer = message.content

    if check_abnormal_answer(user_answer, current_question):
        score = 0
        feedback = "检测到非正常答题行为，本题按0分处理。"
    else:
        points_per_question = TOTAL_SCORE // TOTAL_QUESTIONS
        # 同样用 await cl.Message 替代 cl.Status
        await cl.Message(content="正在评分...", author="系统").send()
        score, feedback = scoring_module.score_answer(current_question, user_answer, points_per_question)

    current_question_data["answer"] = user_answer
    current_question_data["score"] = score
    current_question_data["feedback"] = feedback
    total_score += score
    cl.user_session.set("total_score", total_score)
    cl.user_session.set("exam_answers", exam_answers)

    await cl.Message(content=f"**得分**：{score}/{points_per_question}分").send()
    await cl.Message(content=f"**反馈**：\n{feedback}").send()

    if current_question_index < TOTAL_QUESTIONS:
        await send_next_question()
    else:
        await finish_exam()


async def send_next_question():
    """
    生成并发送下一道题目。
    """
    question_generator = cl.user_session.get("question_generator")
    current_question_index = cl.user_session.get("current_question_index")
    exam_answers = cl.user_session.get("exam_answers")

    # 同样用 await cl.Message 替代 cl.Status
    await cl.Message(content=f"正在出第{current_question_index + 1}题...", author="系统").send()
    question = question_generator.generate_question()

    cl.user_session.set("current_question_index", current_question_index + 1)
    # 在这里初始化新的问题字典，确保它不包含 'answer' 键
    exam_answers.append({"question": question})
    cl.user_session.set("exam_answers", exam_answers)

    await cl.Message(content=f"\n**第{current_question_index + 1}题**：{question}").send()


async def finish_exam():
    """
    结束考试，总结得分。
    """
    total_score = cl.user_session.get("total_score")
    exam_answers = cl.user_session.get("exam_answers")

    await cl.Message(content=f"===== 考试结束 =====").send()
    await cl.Message(content=f"您的总得分：{total_score}/{TOTAL_SCORE}").send()

    details = ["\n**===== 答题详情 ===**"]
    for i, item in enumerate(exam_answers, 1):
        details.append(f"\n**第{i}题**：{item['question']}")
        details.append(f"**您的答案**：{item['answer']}")
        details.append(f"**得分**：{item['score']}")
        details.append(f"**反馈**：{item['feedback']}")

    await cl.Message(content="\n".join(details)).send()