File size: 3,501 Bytes
76c5345 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
from dotenv import load_dotenv
load_dotenv();
from typing import List
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from pydantic import BaseModel, Field
class FrontendActions(BaseModel):
"""Commands and options that can be passed to the frontend chatbot to elicit a response or student action"""
label: str = Field(description="Label which will appear on the frontend chatbot button. Omit this is you are just making a server request for more information")
link: str = Field(description="Link to take the user to a different place.")
class Response(BaseModel):
"""Final response to the question being asked. This will be passed to the frontend chatbot for processing"""
message: str = Field(description="The final answer to respond to the user")
tokens: int = Field(description="Count the number of used to produce the response")
#actions: List[FrontendActions] = Field(description="List of actions taken to produce the response.")
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
# If no function was invoked, return to user
if "function_call" not in output.additional_kwargs:
return AgentFinish(return_values={"output": output.content}, log=output.content)
# Parse out the function call
function_call = output.additional_kwargs["function_call"]
name = function_call["name"]
inputs = json.loads(function_call["arguments"])
# If the Response function was invoked, return to the user with the function inputs
if name == "Response":
return AgentFinish(return_values=inputs, log=str(function_call))
# Otherwise, return an agent action
else:
return AgentActionMessageLog(
tool=name, tool_input=inputs, log="", message_log=[output]
)
from langchain.agents import tool
@tool
def placeholder():
"""This is just a placeholder function"""
return "placeholder"
tools=[placeholder]
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "Please only make two suggestions at a time, and output a JSON object using the response scheme provided to you in the associated tool. If you suggest objectives and goals, please make them actions in the schema with the link 'plan'"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = ChatOpenAI(model="gpt-4",temperature=0)
llm_with_tools = llm.bind(
functions=[
# The retriever tool
format_tool_to_openai_function(placeholder),
# Response schema
convert_pydantic_to_openai_function(Response),
]
)
agent = (
{
"input": lambda x: x["input"],
# Format agent scratchpad from intermediate steps
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| parse
)
agent_executor = AgentExecutor(tools=[placeholder], agent=agent, verbose=True)
agent_executor.invoke(
{"input": "Can you suggest to me some actions I could take to become a teacher?"},
return_only_outputs=True,
) |