
from langchain_core.tools import tool
from dataclasses import dataclass, field
from typing import Optional, List, Dict
from loguru import logger
from pydantic import ValidationError

from langgraph.graph.state import CompiledStateGraph
from core.agents.plan import build_plan_team_graph
from core.agents.plan.model import ThoughtData


@dataclass
class AppContext:
    """Holds shared application resources, like the Agno team."""
    team: CompiledStateGraph
    thought_history: List[ThoughtData] = field(default_factory=list)
    branches: Dict[str, List[ThoughtData]] = field(default_factory=dict)

    def add_thought(self, thought: ThoughtData) -> None:
        """Add a thought to history and manage branches"""
        self.thought_history.append(thought)

        # Handle branching
        if thought.branchFromThought is not None and thought.branchId is not None:
            if thought.branchId not in self.branches:
                self.branches[thought.branchId] = []
            self.branches[thought.branchId].append(thought)

    def get_branch_thoughts(self, branch_id: str) -> List[ThoughtData]:
        """Get all thoughts in a specific branch"""
        return self.branches.get(branch_id, [])

    def get_all_branches(self) -> Dict[str, int]:
        """Get all branch IDs and their thought counts"""
        return {branch_id: len(thoughts) for branch_id, thoughts in self.branches.items()}

app_context: Optional[AppContext] = None


@tool
async def sequentialthinking(
        thought: str,
        thoughtNumber: int,
        totalThoughts: int,
        nextThoughtNeeded: bool,
        isRevision: bool = False,
        revisesThought: Optional[int] = None,
        branchFromThought: Optional[int] = None,
        branchId: Optional[str] = None,
        needsMoreThoughts: bool = False
) -> str:
    """
    A detailed tool for dynamic and reflective problem-solving through thoughts.

    This tool helps analyze problems through a flexible thinking process that can adapt and evolve.
    Each thought can build on, question, or revise previous insights as understanding deepens.
    It uses an Agno multi-agent team (in coordinate mode) to process each thought, where a
    Coordinator delegates sub-tasks to specialists (Planner, Researcher, Analyzer, Critic, Synthesizer)
    and synthesizes their outputs.

    When to use this tool:
    - Breaking down complex problems into manageable steps.
    - Planning and design processes requiring iterative refinement and revision.
    - Complex analysis where the approach might need course correction based on findings.
    - Problems where the full scope or optimal path is not clear initially.
    - Situations requiring a multi-step solution with context maintained across steps.
    - Tasks where focusing on relevant information and filtering out noise is crucial.
    - Developing and verifying solution hypotheses through a chain of reasoning.

    Key features & usage guidelines:
    - The process is driven by the caller (e.g., an LLM) making sequential calls to this tool.
    - Start with an initial estimate for `totalThoughts`, but adjust it dynamically via subsequent calls if needed.
    - Use `isRevision=True` and `revisesThought` to explicitly revisit and correct previous steps.
    - Use `branchFromThought` and `branchId` to explore alternative paths or perspectives.
    - If the estimated `totalThoughts` is reached but more steps are needed, set `needsMoreThoughts=True` on the *last* thought within the current estimate to signal the need for extension.
    - Express uncertainty and explore alternatives within the `thought` content.
    - Generate solution hypotheses within the `thought` content when appropriate.
    - Verify hypotheses in subsequent `thought` steps based on the reasoning chain.
    - The caller should repeat the process, calling this tool for each step, until a satisfactory solution is reached.
    - Set `nextThoughtNeeded=False` only when the caller determines the process is complete and a final answer is ready.

    Parameters:
        thought (str): The content of the current thinking step. This can be an analytical step,
                       a plan, a question, a critique, a revision, a hypothesis, or verification.
                       Make it specific enough to imply the desired action.
        thoughtNumber (int): The sequence number of this thought (>=1). Can exceed initial `totalThoughts`
                             if the process is extended.
        totalThoughts (int): The current *estimate* of the total thoughts required for the process.
                             This can be adjusted by the caller in subsequent calls. Minimum 5 suggested.
        nextThoughtNeeded (bool): Indicates if the caller intends to make another call to this tool
                                  after the current one. Set to False only when the entire process is deemed complete.
        isRevision (bool, optional): True if this thought revises or corrects a previous thought. Defaults to False.
        revisesThought (int, optional): The `thoughtNumber` of the thought being revised, required if `isRevision` is True.
                                        Must be less than the current `thoughtNumber`.
        branchFromThought (int, optional): The `thoughtNumber` from which this thought branches to explore an alternative path.
                                           Defaults to None.
        branchId (str, optional): A unique identifier for the branch being explored, required if `branchFromThought` is set.
                                  Defaults to None.
        needsMoreThoughts (bool, optional): Set to True on a thought if the caller anticipates needing more
                                            steps beyond the current `totalThoughts` estimate *after* this thought.
                                            Defaults to False.

    Returns:
        str: The Coordinator agent's synthesized response based on specialist contributions for the current `thought`.
             Includes guidance for the caller on potential next steps (e.g., suggestions for revision or branching
             based on the specialists' analysis). The caller uses this response to formulate the *next* thought.
    """
    global app_context
    if not app_context or not app_context.team:
        logger.error("Application context or Agno team not initialized during tool call.")
        # Attempt re-initialization cautiously, or fail hard.
        # Let's try re-initialization if app_lifespan wasn't used or failed silently.
        logger.warning("Attempting to re-initialize team due to missing context...")
        try:
             team = build_plan_team_graph()
             app_context = AppContext(team=team) # Re-create context
             logger.info("Successfully re-initialized team and context.")
        except Exception as init_err:
             logger.critical(f"Failed to re-initialize Agno team during tool call: {init_err}", exc_info=True)
             # Return only the error message string
             return f"Critical Error: Application context not available and re-initialization failed: {init_err}"
             # Or raise Exception("Critical Error: Application context not available.")

    try:
        # --- Initial Validation and Adjustments ---
        # Create ThoughtData instance first - validation happens here
        current_input_thought = ThoughtData(
            thought=thought,
            thoughtNumber=thoughtNumber,
            totalThoughts=totalThoughts, # Pydantic validator handles minimum now
            nextThoughtNeeded=nextThoughtNeeded,
            isRevision=isRevision,
            revisesThought=revisesThought,
            branchFromThought=branchFromThought,
            branchId=branchId,
            needsMoreThoughts=needsMoreThoughts
        )

        # Use the validated/adjusted value from the instance
        adjusted_total_thoughts = current_input_thought.totalThoughts

        # Adjust nextThoughtNeeded based on validated totalThoughts
        adjusted_next_thought_needed = current_input_thought.nextThoughtNeeded
        if current_input_thought.thoughtNumber >= adjusted_total_thoughts and not current_input_thought.needsMoreThoughts:
             adjusted_next_thought_needed = False

        # Re-create or update the instance if nextThoughtNeeded changed
        # Pydantic models are typically immutable (frozen=True), so create a new one if needed.
        # Check if adjustment is necessary before creating new object
        final_thought_data = current_input_thought
        if adjusted_next_thought_needed != current_input_thought.nextThoughtNeeded:
             logger.info(f"Adjusting nextThoughtNeeded from {current_input_thought.nextThoughtNeeded} to {adjusted_next_thought_needed} based on thoughtNumber/totalThoughts.")
             # Since frozen=True, we need to create a new instance or handle mutability differently.
             # Easiest here might be to create a mutable copy for this logic if needed,
             # or pass adjusted_next_thought_needed separately. Let's pass it separately for now.
             # OR, make the model mutable if this becomes complex.
             # Let's keep it simple: the logic below uses the adjusted flag directly.

        # Add the *validated* thought to history
        app_context.add_thought(final_thought_data)

        # --- Process Thought with Team (Coordinate Mode) ---
        logger.info(f"Passing thought #{final_thought_data.thoughtNumber} to the Coordinator...")

        # Prepare input for the team coordinator. Pass the core thought content.
        # Include context about revision/branching directly in the input string for the coordinator.
        input_prompt = f"Process Thought #{final_thought_data.thoughtNumber}:\n"
        if final_thought_data.isRevision and final_thought_data.revisesThought is not None:
             # Find the original thought text
             original_thought_text = "Unknown Original Thought"
             for hist_thought in app_context.thought_history[:-1]: # Exclude current one
                 if hist_thought.thoughtNumber == final_thought_data.revisesThought:
                     original_thought_text = hist_thought.thought
                     break
             input_prompt += f"**This is a REVISION of Thought #{final_thought_data.revisesThought}** (Original: \"{original_thought_text}\").\n"
        elif final_thought_data.branchFromThought is not None and final_thought_data.branchId is not None:
             # Find the branching point thought text
             branch_point_text = "Unknown Branch Point"
             for hist_thought in app_context.thought_history[:-1]:
                 if hist_thought.thoughtNumber == final_thought_data.branchFromThought:
                     branch_point_text = hist_thought.thought
                     break
             input_prompt += f"**This is a BRANCH (ID: {final_thought_data.branchId}) from Thought #{final_thought_data.branchFromThought}** (Origin: \"{branch_point_text}\").\n"

        input_prompt += f"\nThought Content: \"{final_thought_data.thought}\""

        # Call the team's arun method. The coordinator agent will handle it.
        team_response = await app_context.team.ainvoke({"messages": input_prompt})

        # Ensure coordinator_response is a string, default to empty string if None
        coordinator_response_content = team_response.content if hasattr(team_response, 'content') else None
        coordinator_response = str(coordinator_response_content) if coordinator_response_content is not None else ""

        logger.info(f"Coordinator finished processing thought #{final_thought_data.thoughtNumber}.")
        logger.debug(f"Coordinator Raw Response:\n{coordinator_response}")

        # --- Guidance for Next Step (Coordinate Mode) ---
        additional_guidance = "\n\nGuidance for next step:" # Initialize

        # Use the *potentially adjusted* flag here for correct guidance
        if not adjusted_next_thought_needed:
            # Keep the message for the final thought concise
            additional_guidance = "\n\nThis is the final thought. Review the Coordinator's final synthesis."
        else:
            # Start guidance text for non-final thoughts
            additional_guidance += "\n- **Revision/Branching:** Look for 'RECOMMENDATION: Revise thought #X...' or 'SUGGESTION: Consider branching...' in the response."
            additional_guidance += " Use `isRevision=True`/`revisesThought=X` for revisions or `branchFromThought=Y`/`branchId='...'` for branching accordingly."
            additional_guidance += "\n- **Next Thought:** Based on the Coordinator's response, formulate the next logical thought, addressing any points raised."

        # --- Build Result ---
        result_data = {
            "processedThoughtNumber": final_thought_data.thoughtNumber,
            "estimatedTotalThoughts": final_thought_data.totalThoughts, # Use validated value
            "nextThoughtNeeded": adjusted_next_thought_needed, # Use potentially adjusted value
            # Ensure both parts are strings before concatenating
            "coordinatorResponse": coordinator_response + str(additional_guidance),
            "branches": list(app_context.branches.keys()),
            "thoughtHistoryLength": len(app_context.thought_history),
            "branchDetails": {
                "currentBranchId": final_thought_data.branchId if final_thought_data.branchFromThought is not None else "main",
                "branchOriginThought": final_thought_data.branchFromThought,
                "allBranches": app_context.get_all_branches() # Include counts
            },
            "isRevision": final_thought_data.isRevision,
            "revisesThought": final_thought_data.revisesThought if final_thought_data.isRevision else None,
            "isBranch": final_thought_data.branchFromThought is not None,
            "status": "success"
        }

        # Return only the coordinatorResponse string
        logger.info(f"=== 思考结果 ===\n{result_data['coordinatorResponse']}")
        return result_data["coordinatorResponse"]

    except ValidationError as e:
        logger.error(f"Validation Error processing tool call: {e}")
        # Return only the error message string
        return f"Input validation failed: {e}"
    except Exception as e:
        logger.exception(f"Error processing tool call") # Log full traceback
        # Return only the error message string
        return f"An unexpected error occurred: {str(e)}"

