import asyncio
from typing import Dict, TypedDict, List, AsyncIterator

from langchain_chroma import Chroma
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import Graph


# 假设已经初始化了 retriever 和 llm
llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)
vector_store = Chroma(embedding_function=DashScopeEmbeddings(),persist_directory="chroma_db")
retriever = vector_store.as_retriever()
class GraphState(TypedDict):
    question: str
    documents: List
    answer: str


async def retrieve(state: GraphState) -> Dict:
    retrieved_docs = await vector_store.similarity_search(state["question"], k=1)
    serialized = "\n\n".join(
        (f"Source: {doc.metadata}\n" f"Content: {doc.page_content}")
        for doc in retrieved_docs
    )
    return serialized, retrieved_docs


async def generate(state: GraphState) -> AsyncIterator[Dict]:
    context = "\n\n".join([doc.page_content for doc in state["documents"]])
    messages = [
        HumanMessage(content=f"Context:\n{context}\n\nQuestion: {state['question']}")
    ]

    full_answer = ""
    async for chunk in llm.astream(messages):
        full_answer += chunk.content
        yield {"answer": chunk.content}

    yield {"answer": full_answer, "is_final": True}


# 构建工作流
workflow = Graph()
workflow.add_node("retrieve", retrieve)
workflow.add_node("generate", generate)
workflow.set_entry_point("retrieve")
workflow.add_edge("retrieve", "generate")
app = workflow.compile()


# 流式执行
async def run(query: str):
    print("Q:", query)
    print("A:", end=" ")
    async for output in app.astream({"question": query}):
        if "generate" in output:
            print(output["generate"]["answer"], end="", flush=True)
            if output["generate"].get("is_final", False):
                print("\n")


# 运行示例
queries = [
    "什么是LangChain?",
    "RAG是如何工作的?"
]


async def main():
    for query in queries:
        await run(query)


asyncio.run(main())