from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage,HumanMessage

from langgraph.graph import StateGraph,START

from typing import TypedDict
from PIL import Image as PILImage

import io
import asyncio


api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key=api_key,base_url=api_base,temperature=0)

# 定义父图中的状态
class ParentState(TypedDict):
    user_input:str # 用来接收用户的输入
    final_answer:str # 用来存储大模型针对用户输入的响应

def parent_node_1(state:ParentState):
    response = llm.invoke(state["user_input"])
    return {"final_answer":response}

# 定义子图中的状态
class SubgraphState(TypedDict):
    # 以下三个 key 都是 子图 (subgraph) 中独享的
    response_answer:str
    summary_answer:str
    score:str

# 定义第一个节点，用于接收父图中的响应并且做文本摘要
def subgraph_node_1(state:SubgraphState):
    system_prompt = """
        Please summary the content you receive to 50 words or less
        """
    messages = state["response_answer"] # 这里接收父图传递过来的响应
    messages = [SystemMessage(content=system_prompt)] + [HumanMessage(content=messages.content)]
    response = llm.invoke(messages)
    return {"summary_answer":response}

# 定义第二个节点：
def subgraph_node_2(state:SubgraphState):
    messages = f"""
        This is the full content of what you received：{state["response_answer"]} \n
        This information is summarized for the full content:{state["summary_answer"]} 
        Please rate the text and summary information, returning a scale of 1 to 10. Note: Only the score value needs to be returned.
        """
    response = llm.invoke([HumanMessage(content=messages)])

    # 发送共享状态密钥（'user_input'）的更新
    return {"score":response.content}

subgraph_builder = StateGraph(SubgraphState)
subgraph_builder.add_node("subgraph_node_1",subgraph_node_1)
subgraph_builder.add_node("subgraph_node_2",subgraph_node_2)
subgraph_builder.add_edge(START,"subgraph_node_1")
subgraph_builder.add_edge("subgraph_node_1","subgraph_node_2")
subgraph = subgraph_builder.compile()

def parent_node_2(state:ParentState):
    # 将父图中的状态转换为子图状态
    response = subgraph.invoke({"response_answer":state["final_answer"]})
    # 将子图状态再转换回父状态
    return {"final_answer":response}

builder = StateGraph(ParentState)
builder.add_node("node_1",parent_node_1)
builder.add_node("node_2",parent_node_2)
builder.add_edge(START,"node_1")
builder.add_edge("node_1","node_2")
graph = builder.compile()

def display(image_data):
    image = PILImage.open(io.BytesIO(image_data))
    image.show()

# display(graph.get_graph().draw_mermaid_png())

async def multiAsync():
    async for chunk in graph.astream({"user_input":"我现在想学习大模型，应该关注哪些技术？"},stream_mode="values"):
        print(chunk)
asyncio.run(multiAsync())