|
|
from models.models import groq_model, anthropic_model |
|
|
from tools import taivily_search, serper_search, execute_code, get_youtube_transcript, execute_python_file_url |
|
|
from langgraph.graph import StateGraph, START, END |
|
|
from langchain_core.messages import SystemMessage, AIMessage, ToolMessage |
|
|
from typing import List, TypedDict |
|
|
from langgraph.prebuilt import ToolNode |
|
|
|
|
|
tools = [ |
|
|
taivily_search, |
|
|
serper_search, |
|
|
get_youtube_transcript, |
|
|
execute_code, |
|
|
execute_python_file_url |
|
|
] |
|
|
|
|
|
class EvaluationState(TypedDict): |
|
|
messages: List |
|
|
question: str |
|
|
answer: str |
|
|
external_information: str |
|
|
has_enough_information: bool |
|
|
is_valid_answer: bool |
|
|
step_counter: dict[str, int] |
|
|
|
|
|
bounded_model_groq = groq_model.bind_tools(tools) |
|
|
|
|
|
def call_node(state: EvaluationState): |
|
|
""" |
|
|
This node call the model with the question and the tools |
|
|
""" |
|
|
|
|
|
response = bounded_model_groq.invoke(state["messages"]) |
|
|
|
|
|
state["messages"].append(response) |
|
|
return state |
|
|
|
|
|
tool_node = ToolNode(tools) |
|
|
|
|
|
|
|
|
def answer_question(state: EvaluationState): |
|
|
""" |
|
|
This node get the context information and call the model to get the answer. |
|
|
""" |
|
|
prompt = f"""## Instruction \n I will ask you a question. Report your thoughts, and finish with only YOUR FINAL ANSWER. |
|
|
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. |
|
|
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. |
|
|
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. |
|
|
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. |
|
|
## Question |
|
|
{state["question"]} |
|
|
## Relevant information |
|
|
{state["external_information"]} |
|
|
## answer""" |
|
|
|
|
|
|
|
|
response = anthropic_model.invoke(prompt) |
|
|
state["messages"].append(AIMessage(content=response.content)) |
|
|
state["answer"] = response.content |
|
|
|
|
|
return state |
|
|
|
|
|
def map_answer(state: EvaluationState): |
|
|
""" |
|
|
Map the answer to the final answer |
|
|
""" |
|
|
answer = state["answer"] |
|
|
|
|
|
prompt = f"""## Instruction |
|
|
map the answer to the final answer. The final answer should be a number, string or a list of numbers and/or strings. Remove quotes. |
|
|
|
|
|
## Answer |
|
|
{answer} |
|
|
|
|
|
## Final answer""" |
|
|
answer = anthropic_model.invoke(prompt) |
|
|
|
|
|
return { |
|
|
"answer": answer.content |
|
|
} |
|
|
|
|
|
def validator(state: EvaluationState): |
|
|
""" |
|
|
Validate if the answer fills the requirements |
|
|
""" |
|
|
answer = state["answer"] |
|
|
result = anthropic_model.invoke(f"Validate if the answer fits the next requirements: \n\n{answer}\n\nThe answer should be a number, string or a list of numbers and/or strings. If the answer fits the requirements, return just 'yes', otherwise return 'no'.") |
|
|
|
|
|
state["is_valid_answer"] = result.content.startswith("yes") |
|
|
state["messages"].append(SystemMessage(content=f"Validator: {result.content}")) |
|
|
|
|
|
return state |
|
|
|
|
|
def route_validator(state): |
|
|
state["step_counter"]["validator"] = state["step_counter"].get("validator", 0) + 1 |
|
|
|
|
|
if state["is_valid_answer"] or state["step_counter"]["validator"] > 2: |
|
|
return END |
|
|
else: |
|
|
return "map_answer" |
|
|
|
|
|
def evaluator(state): |
|
|
""" |
|
|
Evaluate if the context information is enough to answer the question. |
|
|
""" |
|
|
prompt = f"""## Instruction |
|
|
Answer just "yes" (without the quotes), if the context information is enough to answer the question. |
|
|
## Question |
|
|
{state["question"]} |
|
|
## Relevant information |
|
|
{state["external_information"]} |
|
|
""" |
|
|
result = anthropic_model.invoke(prompt) |
|
|
state["has_enough_information"] = result.content.startswith("yes") |
|
|
state["messages"].append(SystemMessage(content=f"Evaluator: {result.content}")) |
|
|
|
|
|
return state |
|
|
|
|
|
def route_iteration(state): |
|
|
state["step_counter"]["iteration"] = state["step_counter"].get("iteration", 0) + 1 |
|
|
if state["has_enough_information"] or state["step_counter"]["iteration"] > 2: |
|
|
return "answer_question" |
|
|
else: |
|
|
return "agent" |
|
|
|
|
|
def build_workflow(): |
|
|
""" |
|
|
Build search workflow with conditional edge for evaluation and iteration. |
|
|
""" |
|
|
workflow = StateGraph(EvaluationState) |
|
|
workflow.add_node("agent", call_node) |
|
|
workflow.add_node("action", tool_node) |
|
|
workflow.add_node("evaluator", evaluator) |
|
|
workflow.add_node("answer_question", answer_question) |
|
|
workflow.add_node("map_answer", map_answer) |
|
|
workflow.add_node("validator", validator) |
|
|
|
|
|
workflow.add_edge(START, "agent") |
|
|
workflow.add_edge("agent", "action") |
|
|
workflow.add_edge("action", "evaluator") |
|
|
|
|
|
workflow.add_conditional_edges("evaluator", route_iteration, {"answer_question":"answer_question","agent":"agent"}) |
|
|
|
|
|
workflow.add_edge("answer_question","map_answer") |
|
|
workflow.add_edge("map_answer", "validator") |
|
|
|
|
|
workflow.add_conditional_edges("validator", route_validator, {"map_answer": "map_answer", END: END}) |
|
|
|
|
|
return workflow.compile() |
|
|
|
|
|
""" if __name__ == "__main__": |
|
|
graph = build_workflow() |
|
|
|
|
|
mermaid_text = graph.get_graph().draw_mermaid() |
|
|
|
|
|
print(mermaid_text) """ |