from typing import Literal

import bs4
from langchain import hub
from langchain_community.chat_models import ChatTongyi
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from langgraph.constants import START
from langgraph.graph import StateGraph
from pydantic import SecretStr
from typing_extensions import List, TypedDict, Annotated
from IPython.display import Image, display
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
}

# 网页加载器，加载网页内容
# bs4_strainer = bs4.SoupStrainer(class_=("post-title", "post-header", "post-content"))
loader = WebBaseLoader(
    web_paths=("https://liaoxuefeng.com/books/python/history/index.html",),
    header_template=headers,
    # bs_kwargs={"parse_only": bs4_strainer},
)
docs = loader.load()

print(f"Total characters: {docs[0].page_content}")

# 文本拆分,把文本拆分为 1000 个字符的块，重叠 200 个字符
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,  # chunk size (characters)
    chunk_overlap=200,  # chunk overlap (characters)
    add_start_index=True,  # track index in original document
)
all_splits = text_splitter.split_documents(docs)

print(f"分割最大文档数量 {len(all_splits)} sub-documents.")

# 将文档嵌入到向量存储中
embeddings = DashScopeEmbeddings(
    # model="multimodal-embedding-v1",
    model="text-embedding-v1",
    dashscope_api_key="sk-d16b46d66abb45bb960bd9c57804e2f9",
    # other params...
)
vector_store = InMemoryVectorStore(embeddings)
document_ids = vector_store.add_documents(documents=all_splits)

print(document_ids[:3])

prompt = PromptTemplate.from_template(
    """
使用以下上下文来回答最后的问题。
如果你不知道答案，就说你不知道，不要试图编造答案。
最多使用三句话，并尽可能简洁地回答。
总是在答案的末尾说“谢谢你的提问！”。
{context}

问题: {question}

有用的答案:
    """
)


# prompt = ChatPromptTemplate.from_messages(
#     [
#         ("system", "You are a helpful assistant."),
#         ("user", "{question}"),
#     ]
# )
# 从hub中加载 提示词
# prompt = hub.pull("rlm/rag-prompt")
#
# example_messages = prompt.invoke(
#     {"context": "(context goes here)", "question": "(question goes here)"}
# ).to_messages()

# assert len(example_messages) == 1
# print(example_messages[0].content)





# 构建llm
chatLLM = ChatTongyi(
    model="qwen-plus-2025-04-28",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
    api_key = SecretStr("sk-d16b46d66abb45bb960bd9c57804e2f9"),
    # other params...
)


# class Search(TypedDict):
#     """Search query."""
#
#     query: Annotated[str, ..., "要运行的搜索查询。"]
#     section: Annotated[
#         Literal["beginning", "middle", "end"],
#         ...,
#         "Section to query.",
#     ]



#定义graph的state
class State(TypedDict):
    question: str
    # query: Search
    context: List[Document]
    answer: str


# def analyze_query(state: State):
#     structured_llm = chatLLM.with_structured_output(Search)
#     query = structured_llm.invoke(state["question"])
#     return {"query": query}

# 定义graph的节点，从想了中检索
def retrieve(state: State):
    retrieved_docs = vector_store.similarity_search(state["question"])
    return {"context": retrieved_docs}

# hatLLM 生成答案
def generate(state: State):
    docs_content = "\n\n".join(doc.page_content for doc in state["context"])
    messages = prompt.invoke({"question": state["question"], "context": docs_content})
    response = chatLLM.invoke(messages)
    return {"answer": response.content}

# 构建graph
graph_builder = StateGraph(State).add_sequence([retrieve, generate])
graph_builder.add_edge(START, "retrieve")
graph = graph_builder.compile()

display(Image(graph.get_graph().draw_mermaid_png()))

#调用graph获取
result = graph.invoke({"question": "python的缺点有哪些？"})

print(f'Context: {result["context"]}\n\n')
print(f'Answer: {result["answer"]}')

# #流调用
# for step in graph.stream(
#     {"question": "说明一下python的历史"}, stream_mode="updates"
# ):
#     print(f"{step}\n\n----------------\n")
#
# #流token调用
# for message, metadata in graph.stream(
#         {"question": "说明一下python的历史"}, stream_mode="messages"
# ):
#     print(message.content, end="|")