from langchain.agents import create_agent
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
import os
from dataclasses import dataclass
from langchain.tools import tool, ToolRuntime
from langgraph.checkpoint.memory import InMemorySaver
import streamlit as st
from baidusearch.baidusearch import search
import logging

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

checkpointer = InMemorySaver()

SYSTEM_PROMPT = """
你是一个智能助手，能够根据用户的问题和上下文提供有用的信息。
重要指导：
1. 当使用search_internet工具获取信息后，请将搜索结果的关键信息直接整合到你的回答中
2. 请确保搜索结果中的标题、摘要和URL等重要内容能够被用户看到
3. 不要省略搜索结果中的重要信息
4. 根据用户的问题和搜索结果，提供一个全面而有帮助的回答
"""

# `thread_id` 是给定对话的唯一标识符。
config = {"configurable": {"thread_id": "1"}}

@tool
def get_weather_for_location(city: str) -> str:
    """获取指定城市的天气。"""
    logger.info(f"调用工具: get_weather_for_location, 参数: city={city}")
    result = f"{city}总是阳光明媚！"
    logger.info(f"工具返回: {result}")
    return result

@dataclass
class Context:
    """自定义运行时上下文模式。"""
    user_id: str

@tool
def get_user_location(runtime: ToolRuntime[Context]) -> str:
    """根据用户 ID 获取用户信息。"""
    user_id = runtime.context.user_id
    rlogger.info(f"调用工具: get_user_location, 参数: user_id={user_id}")
    result = "Florida" if user_id == "1" else "SF"
    logger.info(f"工具返回: {result}")
    return result

@tool
def search_internet(query: str) -> str:
    """搜索互联网以获取信息。"""
    logger.info(f"调用工具: search, 参数: query={query}")
    try:
        results = search(query)
        res = f"根据查询 '{query}' 找到的信息。"
        for result in results:
            res += f"\nTitle: {result['title']}\nAbstract: {result['abstract']}\nURL: {result['url']}\n"
        logger.info(f"工具返回: 找到 {len(results)} 条结果")
        print(res)
        return res
    except Exception as e:
        logger.error(f"搜索工具执行出错: {str(e)}")
        return f"搜索执行出错: {str(e)}"

@dataclass
class ResponseFormat:
    """代理的响应模式。"""
    # 带双关语的回应（始终必需）
    punny_response: str
    # 天气的任何有趣信息（如果有）
    weather_conditions: str | None = None

load_dotenv()

model = ChatOpenAI(
    base_url=os.getenv("LLM_BASE_URL"),
    api_key=os.getenv("LLM_API_KEY"),
    model_name=os.getenv("LLM_MODEL_ID"),
)

print(os.getenv("LLM_API_KEY"))
agent = create_agent(
    model=model,
    system_prompt=SYSTEM_PROMPT,
    tools=[get_weather_for_location, search_internet],
    context_schema=Context,
    response_format=ResponseFormat,
    checkpointer=checkpointer
 )
def init_agent():
        # 初始化session_state
    if "messages" not in st.session_state:
        st.session_state.messages = []
    if "agent" not in st.session_state:
        st.session_state.agent = agent

def sidebar():
    with st.sidebar:
        st.title("配置")
        model_type = st.selectbox("选择模型", ["OpenAI", "OpenAI Chat"])
        if model_type == "OpenAI":
            st.text_input("OpenAI API Key", value=os.getenv("LLM_API_KEY"), type="password")

def chat_with_agent(prompt: str):
    if prompt:
        # 显示用户消息
        st.chat_message("user").markdown(prompt)
        st.session_state.messages.append({"role": "user", "content": prompt})
        
        # 如果agent已初始化，调用agent
        if st.session_state.agent:
            try:
                with st.chat_message("assistant"):
                    with st.spinner("思考中..."):
                        response = st.session_state.agent.invoke(
                            {"messages": [{"role": "user", "content": prompt}]},
                            config=config,
                            context=Context(user_id="1")
                        )
                    if isinstance(response, dict) and 'structured_response' in response:
                        response_content = response['structured_response'].punny_response
                    else:
                        response_content = response.punny_response
                    st.markdown(response_content)
                st.session_state.messages.append({"role": "assistant", "content": response_content})
                    # st.markdown(response['structured_response'].punny_response)
                # st.session_state.messages.append({"role": "assistant", "content": response['structured_response'].punny_response})
            except Exception as e:
                st.error(f"调用Agent时出错: {str(e)}")
        else:
            st.warning("请先配置API密钥以初始化Agent")


def main():
    sidebar()
    # 用户输入
    init_agent()
    
    prompt = st.chat_input("请输入您的问题...")
    chat_with_agent(prompt)

main()