# Copyright (c) Microsoft. All rights reserved.

import asyncio
import logging
from typing import cast

from agent_framework import (
    MAGENTIC_EVENT_TYPE_AGENT_DELTA,
    MAGENTIC_EVENT_TYPE_ORCHESTRATOR,
    AgentRunUpdateEvent,
    ChatAgent,
    HostedCodeInterpreterTool,
    MagenticBuilder,
    MagenticHumanInterventionDecision,
    MagenticHumanInterventionKind,
    MagenticHumanInterventionReply,
    MagenticHumanInterventionRequest,
    RequestInfoEvent,
    WorkflowOutputEvent,
)
from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

"""
Sample: Magentic Orchestration + Human Plan Review

What it does:
- Builds a Magentic workflow with two agents and enables human plan review.
  A human approves or edits the plan via `RequestInfoEvent` before execution.

- researcher: ChatAgent backed by OpenAIChatClient (web/search-capable model)
- coder: ChatAgent backed by OpenAIAssistantsClient with the Hosted Code Interpreter tool

Key behaviors demonstrated:
- with_plan_review(): requests a PlanReviewRequest before coordination begins
- Event loop that waits for RequestInfoEvent[PlanReviewRequest], prints the plan, then
    replies with PlanReviewReply (here we auto-approve, but you can edit/collect input)
- Callbacks: on_agent_stream (incremental chunks), on_agent_response (final messages),
    on_result (final answer), and on_exception
- Workflow completion when idle

Prerequisites:
- OpenAI credentials configured for `OpenAIChatClient` and `OpenAIResponsesClient`.
"""


async def main() -> None:
    researcher_agent = ChatAgent(
        name="ResearcherAgent",
        description="Specialist in research and information gathering",
        instructions=(
            "You are a Researcher. You find information without additional computation or quantitative analysis."
        ),
        # This agent requires the gpt-4o-search-preview model to perform web searches.
        # Feel free to explore with other agents that support web search, for example,
        # the `OpenAIResponseAgent` or `AzureAgentProtocol` with bing grounding.
        chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"),
    )

    coder_agent = ChatAgent(
        name="CoderAgent",
        description="A helpful assistant that writes and executes code to process and analyze data.",
        instructions="You solve questions using code. Please provide detailed analysis and computation process.",
        chat_client=OpenAIResponsesClient(),
        tools=HostedCodeInterpreterTool(),
    )

    # Create a manager agent for the orchestration
    manager_agent = ChatAgent(
        name="MagenticManager",
        description="Orchestrator that coordinates the research and coding workflow",
        instructions="You coordinate a team to complete complex tasks efficiently.",
        chat_client=OpenAIChatClient(),
    )

    # Callbacks
    def on_exception(exception: Exception) -> None:
        print(f"Exception occurred: {exception}")
        logger.exception("Workflow exception", exc_info=exception)

    last_stream_agent_id: str | None = None
    stream_line_open: bool = False

    print("\nBuilding Magentic Workflow...")

    workflow = (
        MagenticBuilder()
        .participants(researcher=researcher_agent, coder=coder_agent)
        .with_standard_manager(
            agent=manager_agent,
            max_round_count=10,
            max_stall_count=3,
            max_reset_count=2,
        )
        .with_plan_review()
        .build()
    )

    task = (
        "I am preparing a report on the energy efficiency of different machine learning model architectures. "
        "Compare the estimated training and inference energy consumption of ResNet-50, BERT-base, and GPT-2 "
        "on standard datasets (e.g., ImageNet for ResNet, GLUE for BERT, WebText for GPT-2). "
        "Then, estimate the CO2 emissions associated with each, assuming training on an Azure Standard_NC6s_v3 "
        "VM for 24 hours. Provide tables for clarity, and recommend the most energy-efficient model "
        "per task type (image classification, text classification, and text generation)."
    )

    print(f"\nTask: {task}")
    print("\nStarting workflow execution...")

    try:
        pending_request: RequestInfoEvent | None = None
        pending_responses: dict[str, MagenticHumanInterventionReply] | None = None
        completed = False
        workflow_output: str | None = None

        while not completed:
            # Use streaming for both initial run and response sending
            if pending_responses is not None:
                stream = workflow.send_responses_streaming(pending_responses)
            else:
                stream = workflow.run_stream(task)

            # Collect events from the stream
            async for event in stream:
                if isinstance(event, AgentRunUpdateEvent):
                    props = event.data.additional_properties if event.data else None
                    event_type = props.get("magentic_event_type") if props else None

                    if event_type == MAGENTIC_EVENT_TYPE_ORCHESTRATOR:
                        kind = props.get("orchestrator_message_kind", "") if props else ""
                        text = event.data.text if event.data else ""
                        print(f"\n[ORCH:{kind}]\n\n{text}\n{'-' * 26}")
                    elif event_type == MAGENTIC_EVENT_TYPE_AGENT_DELTA:
                        agent_id = props.get("agent_id", "unknown") if props else "unknown"
                        if last_stream_agent_id != agent_id or not stream_line_open:
                            if stream_line_open:
                                print()
                            print(f"\n[STREAM:{agent_id}]: ", end="", flush=True)
                            last_stream_agent_id = agent_id
                            stream_line_open = True
                        if event.data and event.data.text:
                            print(event.data.text, end="", flush=True)
                elif isinstance(event, RequestInfoEvent) and event.request_type is MagenticHumanInterventionRequest:
                    request = cast(MagenticHumanInterventionRequest, event.data)
                    if request.kind == MagenticHumanInterventionKind.PLAN_REVIEW:
                        pending_request = event
                        if request.plan_text:
                            print(f"\n=== PLAN REVIEW REQUEST ===\n{request.plan_text}\n")
                elif isinstance(event, WorkflowOutputEvent):
                    # Capture workflow output during streaming
                    workflow_output = str(event.data) if event.data else None
                    completed = True

            if stream_line_open:
                print()
                stream_line_open = False
            pending_responses = None

            # Handle pending plan review request
            if pending_request is not None:
                # Get human input for plan review decision
                print("Plan review options:")
                print("1. approve - Approve the plan as-is")
                print("2. approve with comments - Approve with feedback for the manager")
                print("3. revise - Request revision with your feedback")
                print("4. edit - Directly edit the plan text")
                print("5. exit - Exit the workflow")

                while True:
                    choice = input("Enter your choice (1-5): ").strip().lower()  # noqa: ASYNC250
                    if choice in ["approve", "1"]:
                        reply = MagenticHumanInterventionReply(decision=MagenticHumanInterventionDecision.APPROVE)
                        break
                    if choice in ["approve with comments", "2"]:
                        comments = input("Enter your comments for the manager: ").strip()  # noqa: ASYNC250
                        reply = MagenticHumanInterventionReply(
                            decision=MagenticHumanInterventionDecision.APPROVE,
                            comments=comments if comments else None,
                        )
                        break
                    if choice in ["revise", "3"]:
                        comments = input("Enter feedback for revising the plan: ").strip()  # noqa: ASYNC250
                        reply = MagenticHumanInterventionReply(
                            decision=MagenticHumanInterventionDecision.REVISE,
                            comments=comments if comments else None,
                        )
                        break
                    if choice in ["edit", "4"]:
                        print("Enter your edited plan (end with an empty line):")
                        lines = []
                        while True:
                            line = input()  # noqa: ASYNC250
                            if line == "":
                                break
                            lines.append(line)
                        edited_plan = "\n".join(lines)
                        reply = MagenticHumanInterventionReply(
                            decision=MagenticHumanInterventionDecision.REVISE,
                            edited_plan_text=edited_plan if edited_plan else None,
                        )
                        break
                    if choice in ["exit", "5"]:
                        print("Exiting workflow...")
                        return
                    print("Invalid choice. Please enter a number 1-5.")

                pending_responses = {pending_request.request_id: reply}
                pending_request = None

        # Show final result from captured workflow output
        if workflow_output:
            print(f"Workflow completed with result:\n\n{workflow_output}")

    except Exception as e:
        print(f"Workflow execution failed: {e}")
        on_exception(e)


if __name__ == "__main__":
    asyncio.run(main())
