from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langchain_core.messages import AnyMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import StateGraph



from pydantic import BaseModel,Field
from typing import Optional,Union,TypedDict,Annotated
import requests
import json
import operator

from sqlalchemy import Column,Integer,String,create_engine
from sqlalchemy.orm import declarative_base,sessionmaker


class SearchQuery(BaseModel):
    query:str=Field(description="用于联网查询的问题")

@tool(args_schema=SearchQuery)
def fetch_real_time_info(query):
    """Get real-time Internet information"""
    url = "https://google.serper.dev/search"
    payload = json.dumps({
        "q": query,
        "num": 1,
    })
    headers = {
        'X-API-KEY': '22a84d67009121271e4a5eb21d809e11d3bc8d45',
        'Content-Type': 'application/json'
    }

    response = requests.post(url, headers=headers, data=payload)
    data = json.loads(response.text)  # 将返回的JSON字符串转换为字典
    if 'organic' in data:
        return json.dumps(data['organic'], ensure_ascii=False)  # 返回'organic'部分的JSON字符串
    else:
        return json.dumps({"error": "No organic results found"}, ensure_ascii=False)  # 如果没有'organic'键，返回错误信息


class WeatherLoca(BaseModel):
    location:str=Field(description="城市的名称")

@tool(args_schema=WeatherLoca)
def get_weather(location):
    """Call to get the current weather."""
    if location.lower() in ["北京"]:
        return "北京的温度是16度，天气晴朗。"
    elif location.lower() in ["重庆"]:
        return "重庆的温度是32度，天气晴朗。"
    else:
        return "不好意思，并未查询到具体的天气信息。"

class User(BaseModel):
    name:str=Field(description="用户名称")
    age:Optional[int]=Field(description="用户年龄")
    email:str=Field(description="用户邮箱")
    phone:Optional[str]=Field(description="用户手机号")

Base = declarative_base()

class UserInfo(Base):
    __tablename__ = "users"
    id = Column(Integer,primary_key=True)
    name = Column(String(50))
    age = Column(Integer)
    email = Column(String(50))
    phone = Column(String(20))

    # 数据库连接 URI，这里要替换成自己的Mysql 连接信息，以下是各个字段的对应解释：
    # root：MySQL 数据库的用户名。
    # 123456：MySQL 数据库的密码。
    # localhost：MySQL 服务器的 IP 地址。
    # langgraph：要连接的数据库的名称。
    # charset=utf8mb4：设置数据库的字符集为 utf8mb4，支持更广泛的 Unicode 字符


DATABASE_URI = "mysql+pymysql://root:1qaz2wsx@localhost:3306/langgraph?charset=utf8mb4"

engine = create_engine(DATABASE_URI, echo=True)
# 创建会话
Session = sessionmaker(bind=engine)

@tool(args_schema=User)
def insert_db(name, age, email, phone):
    """Insert user information into the database, The required parameters are name, age, email, phone"""
    # 确保为每次操作创建新的会话
    session = Session()
    try:
        # 创建用户实例
        user = UserInfo(name=name,age=age,email=email,phone=phone)
        # 添加到会话
        session.add(user)
        # 提交事务
        session.commit()
        return {"messages":[f"数据已经成功存储到数据库"]}
    except Exception as e:
        session.rollback()
        return {"messages":[f"数据存储失败，错误原因:{e}"]}
    finally:
        session.close()

tools = [fetch_real_time_info,get_weather,insert_db]
tool_node = ToolNode(tools)

api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key = api_key,base_url=api_base,temperature=0)
model_with_tools = llm.bind_tools(tools)
print(model_with_tools)

# 定义正常生成模型回复的模型
class ConversationalResponse(BaseModel):
    """Respond to the user's query in a conversational manner. Be kind and helpful."""
    response:str=Field(description="A conversational response to the user's query")

# 定义最终响应模型，可以是用户信息或一般响应
class FinalResponse(BaseModel):
    final_output:Union[ConversationalResponse,User,SearchQuery,WeatherLoca]

def chat_with_model(state):
    """generate structured output"""
    print(f"chat_with_model:{state}")
    messages = state["messages"]
    llm_structured = llm.with_structured_output(FinalResponse)
    resp = llm_structured.invoke(messages)
    return {"messages":[resp]}

def final_answer(state):
    """generate natural language responses"""
    print(f"final_answer:{state}")
    messages = state["messages"][-1]
    resp = messages.final_output.response
    return {"messages":[resp]}

def execute_function(state):
    """generate natural language responses"""
    print(f"execute_function:{state}")
    messages = state["messages"][-1].final_output
    llm_tool = model_with_tools.invoke(str(messages))
    print(llm_tool)
    resp = tool_node.invoke({"messages":[llm_tool]})
    print(f"resp:{resp}")
    response = resp["messages"][0].content
    return {"messages":[response]}

class AgentState(TypedDict):
    messages:Annotated[list[AnyMessage],operator.add]

def generate_branch(state:AgentState):
    result = state["messages"][-1]
    output = result.final_output

    if isinstance(output,ConversationalResponse):
        return False
    else:
        return True

builder = StateGraph(AgentState)

# 添加三个节点
builder.add_node("chat_with_model",chat_with_model)
builder.add_node("final_answer",final_answer)
builder.add_node("execute_function",execute_function)

# 设置图的启动节点
builder.set_entry_point("chat_with_model")

# 设置条件边
builder.add_conditional_edges("chat_with_model",generate_branch,{True:"execute_function",False:"final_answer"})

# 设置终止节点
builder.set_finish_point("final_answer")
builder.set_finish_point("execute_function")

# 编译图
graph = builder.compile()

# query="请介绍一下你自己"
query = "我叫王杏杏1，今年40岁，电话号码是175855245，邮箱是25458@qq.com"
input_message = {"messages":[query]}
result = graph.invoke(input_message)
print(f"result:{result}")


