from typing import TypedDict, Annotated

from IPython.core.display import Image
from IPython.core.display_functions import display
from langchain_community.chat_models import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.tools import QuerySQLDatabaseTool
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.vectorstores import InMemoryVectorStore
from langgraph.checkpoint.memory import MemorySaver
from langgraph.constants import START
from langgraph.graph import StateGraph
from pydantic import SecretStr
from rich.console import Console
from rich import print as rprint
from rich.markdown import Markdown

# 构建llm
chatLLM = ChatTongyi(
    model="qwen-plus-2025-04-28",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
    api_key = SecretStr("sk-d16b46d66abb45bb960bd9c57804e2f9"),
    # other params...
)

# 将文档嵌入到向量存储中
embeddings = DashScopeEmbeddings(
    # model="multimodal-embedding-v1",
    model="text-embedding-v1",
    dashscope_api_key="sk-d16b46d66abb45bb960bd9c57804e2f9",
    # other params...
)
vector_store = InMemoryVectorStore(embeddings)


from langchain_community.utilities import SQLDatabase

db = SQLDatabase.from_uri("sqlite:///./Chinook.db")
print(db.dialect)
print(db.get_usable_table_names())
result = db.run("SELECT * FROM Artist LIMIT 10;")
print(result)


class State(TypedDict):
    question: str
    query: str
    result: str
    answer: str

system_message = """
给定一个输入问题，创建一个语法正确的{dialect}查询
跑去帮助找到答案。除非用户在问题中指定
他们希望获得的具体示例数量，始终将您的查询限制在
最多{top_k}个结果。您可以按相关列对结果进行排序
返回数据库中最有趣的示例。

从不查询特定表中的所有列，只要求
很少有相关专栏给出这个问题。

注意只使用模式中可见的列名
描述。注意不要查询不存在的列。也，
注意哪列在哪张表中。

仅使用下表：
{table_info}
"""

user_prompt = "问题: {input}"

query_prompt_template = ChatPromptTemplate(
    [("system", system_message), ("user", user_prompt)]
)

for message in query_prompt_template.messages:
    message.pretty_print()


class QueryOutput(TypedDict):
    """Generated SQL query."""

    query: Annotated[str, ..., "语法上有效的SQL查询。"]


def write_query(state: State):
    """Generate SQL query to fetch information."""
    prompt = query_prompt_template.invoke(
        {
            "dialect": db.dialect,
            "top_k": 10,
            "table_info": db.get_table_info(),
            "input": state["question"],
        }
    )
    structured_llm = chatLLM.with_structured_output(QueryOutput)
    result = structured_llm.invoke(prompt)
    return {"query": result["query"]}

# result = write_query({"question":"一共有多少员工？"})
# print(result)


def execute_query(state: State):
    """Execute SQL query."""
    execute_query_tool = QuerySQLDatabaseTool(db=db)
    return {"result": execute_query_tool.invoke(state["query"])}

# result = execute_query({"query": "SELECT COUNT(EmployeeId) AS EmployeeCount FROM Employee;"})
# print("测试执行sql:",result)


def generate_answer(state: State):
    """Answer question using retrieved information as context."""
    prompt = (
        "给定以下用户问题，获取相应的SQL查询，和SQL执行结果，回答用户问题。\n\n"
        f'问题: {state["question"]}\n'
        f'查询sql: {state["query"]}\n'
        f'sql执行结果: {state["result"]}'
    )
    response = chatLLM.invoke(prompt)
    return {"answer": response.content}

graph_builder = StateGraph(State).add_sequence(
    [write_query, execute_query, generate_answer]
)
graph_builder.add_edge(START, "write_query")

memory = MemorySaver()
graph = graph_builder.compile(checkpointer=memory,interrupt_before=["execute_query"])

png_data = graph.get_graph().draw_mermaid_png()
# 写入本地文件
with open("graph.png", "wb") as f:
    f.write(png_data)

# mermaid_code = graph.get_graph().draw_mermaid()
# rprint(Markdown(f"```mermaid\n{mermaid_code}\n```"))

# for step in graph.stream(
#     {"question": "一共有多少员工?"}, stream_mode="updates"
# ):
#     print(step)


config = {"configurable": {"thread_id": "1"}}
for step in graph.stream(
    {"question": "一共有多少员工?"},
    config,
    stream_mode="updates",
):
    print(step)

print("=====================")

try:
    user_approval = input("您想执行查询吗？(yes/no): ")
except Exception:
    user_approval = "no"

if user_approval.lower() == "yes":
    # If approved, continue the graph execution
    for step in graph.stream(None, config, stream_mode="updates"):
        print(step)
else:
    print("Operation cancelled by user.")