from langgraph.store.memory import InMemoryStore
from langgraph.store.memory import InMemoryStore

from langchain.embeddings import init_embeddings
from langchain_community.embeddings import ZhipuAIEmbeddings

from langchain_community.embeddings import ZhipuAIEmbeddings


from langgraph.checkpoint.memory import InMemorySaver


from typing_extensions import TypedDict

from langgraph.config import get_store
from langchain_core.runnables import RunnableConfig
from langgraph.prebuilt import create_react_agent
from langgraph.store.memory import InMemoryStore

from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.messages import HumanMessage
import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import json
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import InMemorySaver



from langchain.embeddings import init_embeddings
from langgraph.store.memory import InMemoryStore

# Create store with semantic search enabled

from langchain_anthropic import ChatAnthropic
from langmem.short_term import SummarizationNode, RunningSummary
from langchain_core.messages.utils import count_tokens_approximately
from langgraph.prebuilt import create_react_agent
from langgraph.prebuilt.chat_agent_executor import AgentState
from langgraph.checkpoint.memory import InMemorySaver
from typing import Any
from typing import Any, TypedDict

from langchain.chat_models import init_chat_model
from langchain_core.messages import AnyMessage
from langchain_core.messages.utils import count_tokens_approximately
from langgraph.graph import StateGraph, START, MessagesState
from langgraph.checkpoint.memory import InMemorySaver
from langmem.short_term import SummarizationNode, RunningSummary

model = ChatTongyi(
    model="qwen-max",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
     api_key='sk-13c8bc2d23274db682f193b16ce57b64'
)

summarization_node = SummarizationNode( 
    token_counter=count_tokens_approximately,
    model=model,
    max_tokens=384,
    max_summary_tokens=128,
    output_messages_key="llm_input_messages",
)

class State(AgentState):
    # NOTE: we're adding this key to keep track of previous summary information
    # to make sure we're not summarizing on every LLM call
    context: dict[str, RunningSummary]  


checkpointer = InMemorySaver() 



class State(MessagesState):
    context: dict[str, RunningSummary]  

class LLMInputState(TypedDict):  
    summarized_messages: list[AnyMessage]
    context: dict[str, RunningSummary]

summarization_node = SummarizationNode(
    token_counter=count_tokens_approximately,
    model=model,
    max_tokens=256,
    max_tokens_before_summary=256,
    max_summary_tokens=128,
)

def call_model(state: LLMInputState):  
    response = model.invoke(state["summarized_messages"])
    return {"messages": [response]}

checkpointer = InMemorySaver()
builder = StateGraph(State)
builder.add_node(call_model)
builder.add_node("summarize", summarization_node)
builder.add_edge(START, "summarize")
builder.add_edge("summarize", "call_model")
graph = builder.compile(checkpointer=checkpointer)

# Invoke the graph
config = {"configurable": {"thread_id": "1"}}
graph.invoke({"messages": "hi, my name is bob"}, config)
graph.invoke({"messages": "write a short poem about cats"}, config)
graph.invoke({"messages": "now do the same but for dogs"}, config)
final_response = graph.invoke({"messages": "what's my name?"}, config)

print(final_response)






'''
model = ChatTongyi(
    model="qwen-max",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
     api_key='sk-13c8bc2d23274db682f193b16ce57b64'
)

store = InMemoryStore() 

class UserInfo(TypedDict): 
    name: str
    age:int

def save_user_info(user_info: UserInfo, config: RunnableConfig) -> str: 
    """Save user info."""
    # Same as that provided to `create_react_agent`
    store = get_store() 
    
    user_id = config["configurable"].get("user_id")
    store.put(("users",), user_id, user_info) 
    return "Successfully saved user info."
def delete_user_info(user_info: UserInfo, config: RunnableConfig) -> str: 
    """delete all user info."""
    # Same as that provided to `create_react_agent`
    store = get_store() 
    
    user_id = config["configurable"].get("user_id")
    store.delete(("users",), user_id) 
    print("ssssssssss")
    return "Successfully saved user info."

agent = create_react_agent(
    model=model,
    tools=[save_user_info,delete_user_info],
    store=store
)

# Run the agent
agent.invoke(
    {"messages": [{"role": "user", "content": "My name is John Smith. and my age is 16"}]},
    config={"configurable": {"user_id": "user_123"}} 
)
output=store.get(("users",), "user_123").value

print("11111111:"+str(output))


agent.invoke(
    {"messages": [{"role": "user", "content": "delete all user info"}]},
    config={"configurable": {"user_id": "user_123"}} 
)
output=store.get(("users",), "user_123").value

print("222:"+str(output))



embeddings = ZhipuAIEmbeddings(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)

store = InMemoryStore(
    index={
        "embed": embeddings,
        "dims": 1536,
    }
)

store.put(("user_123", "memories"), "1", {"text": "I love pizza"})
store.put(("user_123", "memories"), "2", {"text": "I am a plumber"})

items = store.search(
    ("user_123", "memories"), query="I'm hungry",limit=1)

print(items)





model = ChatTongyi(
    model="qwen-max",   # 此处以qwen-max为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    streaming=True,
     api_key='sk-13c8bc2d23274db682f193b16ce57b64'
)

store = InMemoryStore() 

class UserInfo(TypedDict): 
    name: str
    age:int

def save_user_info(user_info: UserInfo, config: RunnableConfig) -> str: 
    """Save user info."""
    # Same as that provided to `create_react_agent`
    store = get_store() 
    
    user_id = config["configurable"].get("user_id")
    store.put(("users",), user_id, user_info) 
    return "Successfully saved user info."

agent = create_react_agent(
    model=model,
    tools=[save_user_info],
    store=store
)

# Run the agent
agent.invoke(
    {"messages": [{"role": "user", "content": "My name is John Smith. and my age is 16"}]},
    config={"configurable": {"user_id": "user_123"}} 
)

 

# You can access the store directly to get the value
output=store.get(("users",), "user_123").value

print(output)




embeddings = ZhipuAIEmbeddings(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)

 

store = InMemoryStore(
    index={
        "embed": embeddings,  # Embedding provider
        "dims": 1536,                              # Embedding dimensions
        "fields": ["name"]              # Fields to embed
    }
)


user_id = "my-user"
application_context = "chitchat"
namespace = (user_id, application_context)
store.put(
    namespace,
    "a-memory",
    {
        "rules": [
            "User likes short, direct language",
            "User only speaks English & python",
        ],
        "my-key": "my-value",
    },
)
# get the "memory" by ID
item = store.get(namespace, "a-memory")
# search for "memories" within this namespace, filtering on content equivalence, sorted by vector similarity
items = store.search(
    namespace, filter={"my-key": "my-value"}, query="language preferences"
)
print(items)
'''