import json
from uuid import UUID

from dynamiq import Workflow
from dynamiq.callbacks import TracingCallbackHandler
from dynamiq.callbacks.tracing import Run
from dynamiq.flows import Flow
from dynamiq.nodes.agents import Agent
from dynamiq.prompts import (
    Message,
    MessageRole,
    VisionMessage,
    VisionMessageImageContent,
    VisionMessageImageURL,
    VisionMessageTextContent,
)
from dynamiq.runnables import RunnableConfig
from dynamiq.utils import JsonWorkflowEncoder
from examples.llm_setup import setup_llm

# Constants
AGENT_NAME = "Art Agent"
AGENT_ROLE = "Professional writer with the goal of producing well-written and informative responses about art"
INPUT_QUESTION = "Describe main idea of this piece of art."
IMAGE_URL = "IMAGE_URL"

GENERAL_INPUT_QUESTION = "Describe the main idea or central theme of the Mona Lisa painting."


def run_reflection_agent_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="claude", model_name="claude-3-5-sonnet-20240620")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        context_template="You are helpful assistant that answers on question about art."
        "Take into account style of response: {{context}}",
        input_message=Message(content="User request: {{request}}"),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": GENERAL_INPUT_QUESTION,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


def run_react_agent_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="gemini", model_name="gemini-2.0-flash")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        context_template="You are helpful assistant that answers on question about art."
        "Take into account style of response: {{context}}",
        input_message=Message(content="User request: {{request}}"),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": GENERAL_INPUT_QUESTION,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


def run_simple_agent_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="gemini", model_name="gemini-2.0-flash")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        input_message=Message(content="User request: {{request}}"),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": GENERAL_INPUT_QUESTION,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


def run_reflection_agent_vision_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="gemini", model_name="gemini-2.0-flash")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        context_template="You are helpful assistant that answers on question about art."
        "Take into account style of response: {{context}}",
        input_message=VisionMessage(
            content=[
                VisionMessageImageContent(image_url=VisionMessageImageURL(url="{{ url }}")),
                VisionMessageTextContent(text="{{ request }}"),
            ],
            role=MessageRole.USER,
        ),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": INPUT_QUESTION,
                "url": IMAGE_URL,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


def run_react_agent_vision_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="gemini", model_name="gemini-2.0-flash")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        context_template="You are helpful assistant that answers on question about art."
        "Take into account style of response: {{context}}",
        input_message=VisionMessage(
            content=[
                VisionMessageImageContent(image_url=VisionMessageImageURL(url="{{ url }}")),
                VisionMessageTextContent(text="{{ request }}"),
            ],
            role=MessageRole.USER,
        ),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": INPUT_QUESTION,
                "url": IMAGE_URL,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


def run_simple_agent_vision_message_workflow() -> tuple[str, dict[UUID, Run]]:
    """
    Set up and run a workflow using an Agent with OpenAI's language model.

    Returns:
        str: The output content generated by the agent, or an empty string if an error occurs.
        dict[UUID, Run]: Traces runs.
    """
    # Set up OpenAI connection and language model
    llm = setup_llm(model_provider="gemini", model_name="gemini-2.0-flash")
    agent = Agent(
        name=AGENT_NAME,
        llm=llm,
        id="agent",
        verbose=True,
        context_template="You are helpful assistant that answers on question about art."
        "Take into account style of response: {{context}}",
        input_message=VisionMessage(
            content=[
                VisionMessageImageContent(image_url=VisionMessageImageURL(url="{{ url }}")),
                VisionMessageTextContent(text="{{ request }}"),
            ],
            role=MessageRole.USER,
        ),
    )

    # Set up tracing and create the workflow
    tracing = TracingCallbackHandler()
    wf = Workflow(flow=Flow(nodes=[agent]))

    # Run the workflow and handle the result
    try:
        result = wf.run(
            input_data={
                "request": INPUT_QUESTION,
                "url": IMAGE_URL,
                "context": "Keep answer short and simple",
            },
            config=RunnableConfig(callbacks=[tracing]),
        )

        # Verify that traces can be serialized to JSON
        json.dumps(
            {"runs": [run.to_dict() for run in tracing.runs.values()]},
            cls=JsonWorkflowEncoder,
        )

        return result.output[agent.id]["output"]["content"], tracing.runs
    except Exception as e:
        print(f"An error occurred: {e}")
        return "", {}


if __name__ == "__main__":
    output, _ = run_reflection_agent_message_workflow()
    print(output)

    output, _ = run_react_agent_message_workflow()
    print(output)

    output, _ = run_simple_agent_message_workflow()
    print(output)

    output, _ = run_reflection_agent_vision_message_workflow()
    print(output)

    output, _ = run_react_agent_vision_message_workflow()
    print(output)

    output, _ = run_simple_agent_vision_message_workflow()
    print(output)
