# ai_client.py
import os
import asyncio
from message_history.async_pg_history import AsyncPostgresChatMessageHistory
from langchain_community.chat_models import ChatTongyi
from langchain_community.document_loaders import DirectoryLoader, TextLoader, PyPDFLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough
from langchain_core.runnables.history import RunnableWithMessageHistory
from operator import itemgetter

import config

# 注入 DashScope API Key
if config.DASHSCOPE_API_KEY:
    os.environ["DASHSCOPE_API_KEY"] = config.DASHSCOPE_API_KEY

# 初始化 LLM
llm = ChatTongyi(
    model=config.MODEL,
    temperature=config.TEMPERATURE,
)

# ========== 加载文档 ==========
all_docs = []
try:
    txt_docs = DirectoryLoader(
        config.DOC_DIR,
        glob="**/*.txt",
        loader_cls=TextLoader,
        show_progress=True,
        use_multithreading=True,
        silent_errors=True
    ).load()
    md_docs = DirectoryLoader(
        config.DOC_DIR,
        glob="**/*.md",
        loader_cls=TextLoader,
        show_progress=True,
        use_multithreading=True,
        silent_errors=True
    ).load()
    pdf_docs = DirectoryLoader(
        config.DOC_DIR,
        glob="**/*.pdf",
        loader_cls=PyPDFLoader,
        show_progress=True,
        use_multithreading=True,
        silent_errors=True
    ).load()
    all_docs = (txt_docs or []) + (md_docs or []) + (pdf_docs or [])
except Exception as e:
    print(f"⚠️ 文档加载失败: {e}")
    all_docs = []

# 分块
text_splitter = CharacterTextSplitter(separator="\n\n", chunk_size=500, chunk_overlap=80)
texts = text_splitter.split_documents(all_docs) if all_docs else []

# 初始化 Embeddings
embeddings = DashScopeEmbeddings(model="text-embedding-v2")

# 初始化向量数据库（Chroma）
if texts:
    vectorstore = Chroma.from_documents(
        documents=texts,
        embedding=embeddings,
        persist_directory=config.EMBEDDINGS_DIR
    )
    vectorstore.persist()
else:
    try:
        vectorstore = Chroma(
            persist_directory=config.EMBEDDINGS_DIR,
            embedding_function=embeddings
        )
    except Exception as e:
        print("⚠️ 无文档且无法加载向量库，将使用纯 LLM 模式（无检索）")
        vectorstore = None

# 初始化检索器
if vectorstore:
    retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
else:
    retriever = lambda query: []

# 格式化检索结果
def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

# ========== 构建 Prompt ==========
prompt = ChatPromptTemplate.from_messages([
    ("system",
     "你是一名资深 Java 开发专家，精通 JDK 8-17、Spring 生态、并发、JVM 调优等。\n"
     "请基于以下检索到的上下文回答用户的问题。\n"
     "如果上下文不相关，请仅基于你的知识回答，不要编造。\n\n"
     "上下文：\n{context}"
    ),
    MessagesPlaceholder(variable_name="chat_history"),
    ("human", "{input}")
])

# ========== 构建 RAG 链 ==========
rag_chain = (
    RunnablePassthrough.assign(context=itemgetter("input") | retriever | format_docs)
    | prompt
    | llm
    | StrOutputParser()
)

# 获取对话历史 ⚠️，
def get_session_history(session_id: str):
    return AsyncPostgresChatMessageHistory(session_id=session_id)

# ========== 带记忆的完整链 ==========
rag_with_history = RunnableWithMessageHistory(
    rag_chain,
    get_session_history,
    input_messages_key="input",
    history_messages_key="chat_history"
)

# ========== 异步对话测试函数 ==========
async def run_conversation():
    SESSION_ID = "user_session_1"

    print("💬 欢迎使用 Java 专家问答系统（PostgreSQL 多轮对话）\n")

    # 第一轮
    question1 = "怎么自定义线程池,不用给我代码，简单说不超过200个字"
    print(f"👤 用户: {question1}")
    print("🤖 AI: ", end="", flush=True)
    async for chunk in rag_with_history.astream(
        {"input": question1},
        config={"configurable": {"session_id": SESSION_ID}}
    ):
        print(chunk, end="", flush=True)
    print("\n")

    # 第二轮
    question2 = "那核心线程数一般设多少？"
    print(f"\n👤 用户: {question2}")
    print("🤖 AI: ", end="", flush=True)
    async for chunk in rag_with_history.astream(
        {"input": question2},
        config={"configurable": {"session_id": SESSION_ID}}
    ):
        print(chunk, end="", flush=True)
    print("\n")

    # 第三轮
    question3 = "如果任务很多，队列会满吗？"
    print(f"\n👤 用户: {question3}")
    print("🤖 AI: ", end="", flush=True)
    async for chunk in rag_with_history.astream(
        {"input": question3},
        config={"configurable": {"session_id": SESSION_ID}}
    ):
        print(chunk, end="", flush=True)
    print("\n")

    print("\n✅ 多轮对话测试完成！对话已存入 PostgreSQL。")

# ========== 主入口 ==========
if __name__ == "__main__":
    # 运行对话
    asyncio.run(run_conversation())