
from typing import TypedDict, List, Optional, Dict, Any
from langgraph.graph import StateGraph, END
from langchain_core.messages import BaseMessage

from ...core.interfaces import OrchestratorInterface, Task, TaskResult
from ..base_agent import BaseAgent

class AgentState(TypedDict):
    """
    Defines the state that flows through the graph.

    Attributes:
        task_description: The original task description.
        steps: A list of detailed steps of task execution.
        intermediate_results: Intermediate results, with step names as keys and results as values.
        final_result: The final result of the task.
        next_agent: The name of the next agent to execute.
        error: Records any errors during task execution.
    """
    task_description: str
    steps: List[str]
    intermediate_results: Dict[str, Any]
    final_result: Optional[str]
    next_agent: str
    error: Optional[str]

class LangGraphOrchestrator(OrchestratorInterface):
    def __init__(self):
        self.specialists: Dict[str, BaseAgent] = {}
        self.workflow = None

    def register_agent(self, agent: BaseAgent):
        """Registers a specialist agent, making it a node in the graph."""
        self.specialists[agent.name] = agent

    def _create_workflow(self):
        """Builds the LangGraph workflow."""
        workflow = StateGraph(AgentState)

        # 1. Add all specialist agents as nodes
        for name, agent in self.specialists.items():
            workflow.add_node(name, lambda state, agent=agent: self._run_agent(state, agent))

        # 2. Define the routing logic
        workflow.add_conditional_edges(
            "__start__",
            self._router,
            {name: name for name in self.specialists.keys()} | {"END": END}
        )
        
        # 3. Connect all nodes to a central router
        # This part is simplified. A real implementation might need a more complex routing mechanism.
        for name in self.specialists.keys():
            workflow.add_edge(name, "__router__")

        # The router node itself decides the next step
        workflow.add_node("__router__", self._router_node)
        workflow.add_edge("__router__", END) # By default, end after the router.

        # 4. Set entry and exit points
        workflow.set_entry_point("__start__")
        
        return workflow.compile()

    def _router_node(self, state: AgentState) -> AgentState:
        """
        A placeholder for a more complex routing logic.
        This could be an LLM call to decide the next step based on the current state.
        """
        # For now, we just pass the state through. The conditional edges handle the routing.
        return state

    def _router(self, state: AgentState) -> str:
        """Determines the next node based on the current state."""
        if state.get("error"):
            return "END"
        
        next_agent = state.get("next_agent")
        if not next_agent or next_agent == "FINISH" or next_agent not in self.specialists:
            return "END"
        
        return next_agent

    def _run_agent(self, state: AgentState, agent: BaseAgent) -> AgentState:
        """Executes an agent node and updates the state."""
        try:
            result = agent.run(state['task_description'], **state.get('intermediate_results', {}))
            
            state['steps'].append(agent.name)
            state['intermediate_results'][agent.name] = result
            
            if isinstance(result, dict) and "next_agent" in result:
                state["next_agent"] = result["next_agent"]
            else:
                state["next_agent"] = "FINISH"

        except Exception as e:
            state["error"] = f"Error in agent {agent.name}: {e}"
        
        return state

    async def orchestrate_task(self, task: Task) -> TaskResult:
        """Executes a task using LangGraph."""
        if not self.workflow:
            self.workflow = self._create_workflow()

        # Assume the first agent is passed in the task or determined here
        initial_agent = "resume_analyst" # Example starting point

        initial_state = AgentState(
            task_description=task.description,
            steps=[],
            intermediate_results={},
            final_result=None,
            next_agent=initial_agent,
            error=None
        )

        # LangGraph's invoke is synchronous, so we run it in a thread pool
        # to avoid blocking the asyncio event loop if any part of it is blocking.
        # For a fully async setup, you would use `ainvoke`.
        final_state = self.workflow.invoke(initial_state)

        return TaskResult(
            task_id=task.id,
            status="completed" if not final_state.get("error") else "failed",
            result=final_state.get("final_result") or str(final_state)
        )
