import random
import time
from typing import TypedDict, Optional, Union
from langchain.agents import create_agent
from langchain.tools import tool, ToolRuntime
from langgraph.graph import StateGraph, START, END
from langgraph.types import interrupt, Command, CachePolicy, RetryPolicy
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.cache.memory import InMemoryCache

from langgraph.checkpoint.memory import InMemorySaver as GraphMemorySaver
from langgraph.checkpoint.redis import RedisSaver
from langgraph.store.redis import RedisStore
from langchain.chat_models import init_chat_model
from IPython.display import Image, display
from model.Ark import ArkModel

@tool
def search_my_info(runtime: ToolRuntime):
    """ 查询用户喜欢的小动物 """
    if runtime.context['user_id'] == 123:
        return '小狗'
    else:
        return '小猫'

## 并行节点任务执行
def run_parallel_graph():
    model = create_agent(
        model=ArkModel().model,
        tools=[search_my_info]
    )

    class AgentState(TypedDict):
        input: str
        joke: str
        story: str
        favorite: str
        summary_output: str

    def generator_joke(state: AgentState):
        print('---> 执行：开始生成笑话')
        response = model.invoke({ "messages":[{ "role": "user", "content": "讲个关于程序员的笑话" }] })
        return { "joke": response['messages'][-1].content }

    def generator_story(state: AgentState):
        print('---> 执行：开始生成故事')
        response = model.invoke({ "messages":[{ "role": "user", "content": "讲个关于程序员的故事, 不超过100字" }] })
        return { "story": response['messages'][-1].content }

    def generator_favorite(state: AgentState):
        print('---> 执行：开始查询爱好')
        response = model.invoke({ "messages":[{ "role": "user", "content": "我喜欢的小动物是什么？" }] }, context={ "user_id": 123 })
        return { "favorite": response['messages'][-1].content }

    def summary(state: AgentState):
        print('----> 开始汇总', state)
        content = ''
        content += state.get('joke') + '\n\n'
        content += state.get('story') + '\n\n'
        content += state.get('favorite')

        return {'summary_output': content}

    graph_builder = StateGraph(AgentState)

    graph_builder.add_node('generator_joke', generator_joke)
    graph_builder.add_node('generator_story', generator_story)
    graph_builder.add_node('generator_favorite', generator_favorite)
    graph_builder.add_node('summary', summary)

    ## 当 start 开始为多个时，则当前的执行为并行执行的
    graph_builder.add_edge(START, 'generator_joke')
    graph_builder.add_edge(START, 'generator_story')
    graph_builder.add_edge(START, 'generator_favorite')

    ## 因为下面的这几个节点同时指定了当前节点执行完成后，执行 summary，因此，summary 会在这三个节点都执行完成后，才会执行一次。
    ## 如果是想每个节点执行完成后，执行一次 summary，这种方案其实是错的。因为其实没必要，完全可以在执行节点中去执行任务。
    graph_builder.add_edge('generator_joke', 'summary') ## generator_joke 执行完成后执行 summary
    graph_builder.add_edge('generator_story', 'summary') ## generator_story 执行完成后执行 summary
    graph_builder.add_edge('generator_favorite', 'summary') ## generator_favorite 执行完成后执行 summary
    graph_builder.add_edge('summary', END) ## summary 执行完成后，流程结束

    graph_compiler = graph_builder.compile()

    response = graph_compiler.invoke({ 'input': '12312' })
    print(response['summary_output'])

## 更灵活的节点例子，在节点中更新内容并指定可跳转的节点
def run_flexible_graph():
    ## 定义一段内容，让模型从这段内容中查询数据
    data = {
        'people': """
            姓名  是否中奖  座位号
            张三  是  A01
            李四  否  A02
        """,
        "favorite": """
            姓名  喜欢的动物  喜欢的食物
            张三  小猫  白菜
            李四  小狗  萝卜
        """
    }

    model = create_agent(
        model=ArkModel().model,
        system_prompt=f"用户的信息为：{data['people']}\n\n\n用户的爱好为：f'{data['favorite']}'"
    )

    class AgentState(TypedDict):
        input: str
        people_info: str
        favorite_info: str
        username: str

    def search_favorite_info(state: AgentState):
        username = state.get('username')
        response = model.invoke({"messages": [{ "role'": 'user', 'content': f'查询{username}的喜欢的小动物是什么?' }]})
        return {'favorite_info': response['messages'][-1].content}

    def search_people_info(state: AgentState):
        username = state.get('username')
        response = model.invoke({"messages": [{ "role": 'user', 'content': f'查询{username}是不是中奖了?' }]})
        return Command(update={ 'people_info': response['messages'][-1].content }, goto='search_people_add')

    def search_people_add(state: AgentState):
        username = state.get('username')
        response = model.invoke({"messages": [{"role": 'user', 'content': f'查询{username}的座位号是多少?'}]})
        content = response['messages'][-1].content
        return { 'people_info': state.get('people_info') + '\n' + content }

    def search_people(state: AgentState):
        ## 可通过 Command来修改state的内容以及通过 goto 参数可动态的跳转至对应的节点
        user_info = { '1000': '张三', '1001': '李四' }
        return Command(
            update={
                "username": user_info[state['input']]
            },
            goto='search_people_info'
        )

    with_builder = StateGraph(AgentState)
    with_builder.add_node('search_people', search_people)
    with_builder.add_node('search_people_info', search_people_info)
    with_builder.add_node('search_favorite_info', search_favorite_info)
    with_builder.add_node('search_people_add', search_people_add)

    with_builder.add_edge(START, 'search_people')
    with_builder.add_edge('search_people_info', END)
    with_builder.add_edge('search_favorite_info', END)
    with_builder.add_edge('search_people_add', END)

    with_graph = with_builder.compile()
    response = with_graph.invoke({'input': '1000'})
    print(response)

## 人机交互，当遇到必要的信息发现缺失时，向用户问询必要的信息
def user_fixable():
    data = {
        'people': """
            姓名  是否中奖  座位号
            张三  是  A01
            李四  否  A02
        """,
        "favorite": """
            姓名  喜欢的动物  喜欢的食物
            张三  小猫  白菜
            李四  小狗  萝卜
        """
    }

    model = create_agent(
        model=ArkModel().model,
        system_prompt=f"用户的信息为：{data['people']}\n\n\n用户的爱好为：f'{data['favorite']}'"
    )

    def has_user(user: str) -> Union[str, None] :
        favorite = data['favorite']

        if user in favorite:
            return user
        else:
            return None

    class AgentState(TypedDict):
        input: str
        username: str
        search_result: str

    def search_user_info(state: AgentState):

        ## 抛出断点，让 graph 中断执行，user_input 为下方command中resume 传递的值
        user_input = interrupt({
            "message": f"暂无查询到 {state.get('input')}的信息， 请直接输入用户的名称：",
        })

        ## 当上方抛出中断信号后，下方则不会执行，只有收到 resume 信号后，才会从这里继续执行
        return Command(update={ 'input': user_input }, goto='search_user_result')

    def search_user_result(state: AgentState):
        input_name = state.get('input')
        username = has_user(input_name)

        if not username:
            return Command(update={}, goto='search_user_info')

        response = model.invoke({ "messages": [{ 'role': 'user', 'content': f'{username} 喜欢吃什么？' }] })
        return { 'search_result': response['messages'][-1].content }

    with_builder = StateGraph(AgentState)
    with_builder.add_node('search_user_result', search_user_result)
    with_builder.add_node('search_user_info', search_user_info)

    with_builder.add_edge(START, 'search_user_result')
    with_builder.add_edge('search_user_result', END)

    graph = with_builder.compile(checkpointer=InMemorySaver())

    config = {"configurable": {"thread_id": "1"}}
    search_query_user = input('请输入用户的id或名称查询用户的爱好：')
    result = graph.invoke({ 'input': search_query_user }, config)

    ## 当遇到中断信号后，state 中会包含 __interrupt__ 及返回的内容。
    ## 当需要恢复执行时，需要调用 Command(resume='xxxxx')，resume 的内容就是传递给 interrupt 的返回值
    while "__interrupt__" in result:
        stop_content = result.get('__interrupt__')[0].value
        name = input(stop_content.get('message'))
        result = graph.invoke(Command(resume=name), config)

    print(result)

## 节点缓存
def cache_node():
    class AgentState(TypedDict):
        input: str
        cache_node_des: str
        result: str

    def cache_node(state: AgentState):
        random_num = random.random()
        return { "cache_node_des": str(random_num) }

    def count_node(state: AgentState):
        cache_node_des = state.get('cache_node_des')
        return { 'result': '本次生成的随机数为:' + cache_node_des }

    with_builder = StateGraph(AgentState)

    ## 新增 cache_policy 参数，开启节点缓存。当开启了缓存之后
    ## 如果配合 InMemorySaver 一起使用时，则需要新 key_func 返回固定的字符串
    with_builder.add_node('cache_node', cache_node, cache_policy=CachePolicy(ttl=120, key_func=lambda x: '123'))
    with_builder.add_node('count_node', count_node)
    with_builder.add_edge(START, 'cache_node')
    with_builder.add_edge('cache_node', 'count_node')
    with_builder.add_edge('count_node', END)

    ## 在这里添加 cache 字段启动graph的缓存
    graph = with_builder.compile(cache=InMemoryCache(), checkpointer=InMemorySaver())
    config = {"configurable": {"thread_id": "1"}}
    while True:
        input('开始执行:\n')
        response = graph.invoke({ 'input': '123123' }, config)
        print(response)

## 延迟节点
def run_node():
    class AgentState(TypedDict):
        node_1: str
        node_2: str
        node_3: str
        node_4: str
        node_5: str
        node_6: str

    def node1(state: AgentState):
        return { 'node_1': 'node1' }

    def node2(state: AgentState):
        return { 'node_2': 'node2' }

    def node3(state: AgentState):
        return { 'node_3': 'node3' }

    def node4(state: AgentState):
        return { 'node_4': 'node4' }

    def node5(state: AgentState):
        print('---> node5 exec', state)
        return { 'node_5': 'node5' }

    def node6(state: AgentState):
        return { 'node_6': 'node6' }

    with_builder = StateGraph(AgentState)
    with_builder.add_node('node1', node1)
    with_builder.add_node('node2', node2)
    with_builder.add_node('node3', node3)
    with_builder.add_node('node4', node4)

    ## 这里是存在一个坑的，defer 当并行线路在同时执行时，如果两条线路的节点是一致的，那么则node5 会自动等待两条线路执行完成。
    ## 如果是两条线路的节点是不一致的，那么就 node5 才会被执行两次，此时 node5 新增 defer 参数才能发挥出作用
    with_builder.add_node('node5', node5, defer=True)
    with_builder.add_node('node6', node6)

    with_builder.add_edge(START, 'node1')
    with_builder.add_edge(START, 'node2')
    with_builder.add_edge('node1', 'node3')
    with_builder.add_edge('node2', 'node4')
    with_builder.add_edge('node4', 'node6')
    with_builder.add_edge('node3', 'node5')
    with_builder.add_edge('node6', 'node5')
    with_builder.add_edge('node5', END)

    graph = with_builder.compile()

    display(Image(graph.get_graph().draw_mermaid_png(output_file_path='./graph.png')))
    response = graph.invoke({})

## redis 存储
def redis_node():
    class AgentState(TypedDict):
        query: str
        content: str

    model = create_agent(
        model=ArkModel().model,
    )
    checkpointer = GraphMemorySaver()

    def call_node(state: AgentState):
        res = model.invoke({"messages": [{"role": 'user', "content": state.get('query')}]})
        return {"content": res['messages'][-1].content}

    with_builder = StateGraph(AgentState)
    with_builder.add_node('call_node', call_node)
    with_builder.add_edge(START, "call_node")
    with_builder.add_edge("call_node", END)

    graph = with_builder.compile(checkpointer=checkpointer)

    while True:
        question = input("请输入你的问题：")
        response = graph.invoke(
            {"messages": [{"role": "user", 'content': question}]},
            {"configurable": {"thread_id": "1"}},
        )
        print(response)

    # DB_URI = "redis://localhost:6379"
    # with (
    #     RedisStore.from_conn_string(DB_URI) as store,
    #     RedisSaver.from_conn_string(DB_URI) as checkpointer
    # ):
    #     store.setup()
    #     checkpointer.setup()
    #
    #     model = create_agent(
    #         model=ArkModel().model,
    #         checkpointer=checkpointer,
    #         store=store
    #     )
    #
    #     def call_node(state: AgentState):
    #         res = model.invoke({ "messages": [{ "role": 'user', "content": state.get('query')}] })
    #         return { "content": res['messages'][-1].content }
    #
    #     with_builder = StateGraph(AgentState)
    #     with_builder.add_node('call_node', call_node)
    #     with_builder.add_edge(START, "call_node")
    #     with_builder.add_edge("call_node", END)
    #
    #     graph = with_builder.compile(checkpointer=checkpointer, store=store)
    #
    #     while True:
    #         question = input("请输入你的问题：")
    #         response = graph.invoke({ 'query': question }, {
    #             "configurable": {
    #                 "thread_id": "1",
    #             }
    #         })
    #         print(response)

def run():
    redis_node()

