from typing import Generator
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from mapper.vectorstore_mapper import initialize_vectorstore
import os

# 初始化大模型（支持流式输出）
llm = ChatOpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="qwen-plus",
    temperature=0,
    streaming=True
)


def load_and_process_document(file, filename):
    """加载文件并分块，添加文件名元数据"""
    try:
        content = file.decode("utf-8")
        # 为文档添加文件名元数据
        document = Document(page_content=content, metadata={"filename": filename})

        # 文档分块配置
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=100
        )
        splits = text_splitter.split_documents([document])
        # 确保每个分块都携带文件名
        for split in splits:
            split.metadata["filename"] = filename
        return splits
    except Exception as e:
        print(f"文档处理出错: {e}")
        return []


def process_uploaded_file(file, filename):
    """处理上传文件，返回向量存储和分块文档"""
    documents = load_and_process_document(file, filename)
    if not documents:
        return None, []
    vectorstore = initialize_vectorstore(documents)
    return vectorstore, documents


def answer_question(vectorstore, question, target_filename=None) -> Generator:
    """
    流式生成回答：先返回检索到的文档块，再返回回答内容
    :param vectorstore: 向量存储实例，用于检索相关文档
    :param question: 用户的提问内容
    :param target_filename: 可选，指定检索的文档名
    """
    if not vectorstore:
        yield {"type": "error", "data": "向量存储未初始化"}
        return

    # 1. 构建检索参数（支持按文件名过滤）
    search_kwargs = {"k": 3}
    if target_filename:
        search_kwargs["filter"] = {"filename": target_filename}

    # 2. 基于向量数据库进行相似性检索
    try:
        retrieved_docs = vectorstore.similarity_search(
            query=question, **search_kwargs
        )
    except Exception as e:
        yield {"type": "error", "data": f"向量检索失败: {str(e)}"}
        return

    # 3. 提取检索到的文档块信息
    retrieved_chunks_info = []
    for idx, doc in enumerate(retrieved_docs):
        retrieved_chunks_info.append({
            "id": idx,
            "filename": doc.metadata.get("filename", "未知文档"),
            "content": doc.page_content,
            "metadata": doc.metadata
        })

    # 4. 返回检索到的文档块信息
    yield {"type": "retrieved_chunks", "data": retrieved_chunks_info}

    # 5. 构建提示词并生成回答
    context = "\n\n".join([doc.page_content for doc in retrieved_docs])
    prompt = ChatPromptTemplate.from_messages([
        ("system", """你是专业的企业制度解答助手。
请严格根据以下文档内容回答问题，若文档中没有相关信息，请如实说明。
文档内容: {context}"""),
        ("user", "{question}")
    ])
    formatted_prompt = prompt.format(context=context, question=question)

    # 6. 流式返回回答
    try:
        for chunk in llm.stream(formatted_prompt):
            yield {"type": "message", "data": chunk.content}
    except Exception as e:
        yield {"type": "error", "data": f"生成回答失败: {str(e)}"}
