import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGSMITH_TRACING"] = "true"
os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGSMITH_API_KEY"] = "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"
os.environ["LANGSMITH_PROJECT"] = "intent_recog"

from fastapi import FastAPI, HTTPException, Request
from configs.config import config
from module.componentsModule import embedding_model, reranker_model
from module.requestModule import LLMResponse, ChatRequest, ChatResponse
import uvicorn
from contextlib import asynccontextmanager
from module.knowledgeModule import km
from langchain_openai import ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain.retrievers import ContextualCompressionRetriever
from langchain_core.output_parsers import JsonOutputParser
from module.promptModule import system_prompt
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_core.exceptions import OutputParserException
from operator import itemgetter
from typing import List, Dict
from langchain_core.messages import HumanMessage, AIMessage
# 全局变量
rag_chain= None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """生命周期"""
    global rag_chain

    llm = ChatOpenAI(
        api_key=config.models.chat_model_key,
        base_url=config.models.chat_model_url,
        model=config.models.chat_model_name,
        temperature=0.0
    )
    csv_docs = await km.csv_loader_path()
    await km.faiss_loader_path(csv_docs, embedding_model)
    km.__del__()
    
    # 检索器
    vector_store = FAISS.load_local(config.knowledge.faiss_path, embedding_model, allow_dangerous_deserialization=True)
    base_retriever = vector_store.as_retriever(search_kwargs={"k": 10})
    compression_retriever = ContextualCompressionRetriever(base_compressor=reranker_model, base_retriever=base_retriever)

    parser = JsonOutputParser(pydantic_object=LLMResponse)

    prompt = ChatPromptTemplate.from_messages([
        ("system", system_prompt),
        MessagesPlaceholder(variable_name="chat_history"),
        ("human", "{question}"),
    ]).partial(format_instructions=parser.get_format_instructions())
    # 5. 构建 RAG 链 (LCEL)
    def format_docs(docs: List[Dict]) -> str:
        print("\n\n".join(f"分数: {doc.metadata.get('rerank_score', '未知')}\n 来源: {doc.metadata.get('tool', '未知')}\n内容: {doc.page_content}" for doc in docs))
        
        return "\n\n".join(f"来源: {doc.metadata.get('tool', '未知')}\n 内容: {doc.page_content}" for doc in docs)

    def parse_with_fallback(output) -> dict:
        try:
            # 如果输出是AIMessage对象，提取其content
            if hasattr(output, 'content'):
                output_str = output.content
            else:
                output_str = str(output)
            
            return parser.parse(output_str)
        except OutputParserException as e:
            print(f"JSON OutputParser failed. Raw output: '{output_str}'. Error: {e}")
            return {
                "thinking": "模型未能按要求生成JSON格式的回答。以下是模型的原始输出。",
                "final_answer": str(output_str)
            }

    structured_generation_chain = prompt | llm | RunnableLambda(parse_with_fallback)

    rag_chain = (
        {
            "context": itemgetter("question") | compression_retriever | RunnableLambda(format_docs),
            "question": itemgetter("question"),
            "chat_history": itemgetter("chat_history"),
        }
        | RunnablePassthrough.assign(
            llm_output=structured_generation_chain
          )
    )
    print("组件初始化完成")
    yield
    print("--- 服务关闭。 ---")

app = FastAPI(title="AI-WIKI-PLUS V1", lifespan=lifespan)

@app.post("/chat", response_model=ChatResponse)
async def chat_endpoint(request: ChatRequest):
    global rag_chain
    if not rag_chain:
        raise HTTPException(status_code=503, detail="服务未初始化")
    
    chat_history_messages = []
    for msg in request.history:
        if msg.get("role") == "user":
            chat_history_messages.append(HumanMessage(content=msg.get("content")))
        elif msg.get("role") == "assistant":
            chat_history_messages.append(AIMessage(content=msg.get("content")))
    
    input_data = {
        "question": request.question,
        "chat_history": chat_history_messages
    }
    try:
        result = await rag_chain.ainvoke(input_data)
        print(rag_chain.get_prompts())  # 打印提示词
        llm_output = result.get("llm_output")
        return ChatResponse(
            answer=llm_output.get("final_answer", ""),
            thinking=llm_output.get("thinking", ""),
            sources=llm_output.get("sources", "")
        )
    except Exception as e:  
        print(f"处理请求时发生错误: {e}")
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/heath_check")
async def get_heath_state():
    """健康检查"""
    return {"status": "ok"}

if __name__ == "__main__":

    uvicorn.run("app:app", host="0.0.0.0", port=config.app.port, reload=config.app.auto_reload)
