from langchain import hub
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_log_to_str, format_log_to_messages
from langchain.agents.output_parsers import ReActSingleInputOutputParser, JSONAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.llms.openai import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.tools import Tool
from langchain.tools.render import render_text_description
from langchain.utilities.serpapi import SerpAPIWrapper
search = SerpAPIWrapper()
tools = [
    Tool(
        name="Current Search",
        func=search.run,
        description="useful for when you need to answer questions about current events or the current state of the world",
    ),
]


def conversational_with_openai():
    llm = OpenAI(temperature=0)
    prompt = hub.pull("hwchase17/react-chat")
    prompt = prompt.partial(
        tools=render_text_description(tools),
        tool_names=", ".join([t.name for t in tools]),
    )

    llm_with_stop = llm.bind(stop=["\nObservation"])

    agent = (
        {
            "input": lambda x: x["input"],
            "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
            "chat_history": lambda x: x["chat_history"],
        }
        | prompt
        | llm_with_stop
        | ReActSingleInputOutputParser()
    )
    memory = ConversationBufferMemory(memory_key="chat_history")
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)
    # print(agent_executor.invoke({"input": "hi, i am legendqi"})["output"])
    # print(agent_executor.invoke({"input": "whats my name?"})["output"])
    print(agent_executor.invoke({"input": "what are some movies showing 9/21/2023?"})["output"])


def conversation_with_chat_model():
    chat_model = ChatOpenAI(temperature=0)
    prompt = hub.pull("hwchase17/react-chat-json")
    prompt = prompt.partial(
        tools=render_text_description(tools),
        tool_names=", ".join([t.name for t in tools]),
    )
    chat_model_with_stop = chat_model.bind(stop=["\nObservation"])
    # We need some extra steering, or the chat model forgets how to respond sometimes
    TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: 
    ---------------------
    {observation}

    USER'S INPUT
    --------------------

    Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else - even if you just want to respond to the user. Do NOT respond with anything except a JSON snippet no matter what!"""

    agent = (
            {
                "input": lambda x: x["input"],
                "agent_scratchpad": lambda x: format_log_to_messages(
                    x["intermediate_steps"], template_tool_response=TEMPLATE_TOOL_RESPONSE
                ),
                "chat_history": lambda x: x["chat_history"],
            }
            | prompt
            | chat_model_with_stop
            | JSONAgentOutputParser()
    )
    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)
    print(agent_executor.invoke({"input": "hi, i am bob"})["output"])


if __name__ == '__main__':
    conversation_with_chat_model()