"""
智能体运行状态管理
"""

from typing import Any, Dict, List, Optional, TypedDict
from .planner import Plan
from .adaptive_replanner import ReplanDecision, AdaptationContext


class AgentState(TypedDict):
    """React 智能体在执行期间的状态。"""
    
    # 输入输出
    input: str
    output: Optional[str]
    
    # 推理链
    thoughts: List[str]
    actions: List[Dict[str, Any]]
    observations: List[str]
    
    # 当前步骤
    current_step: int
    max_steps: int
    
    # 工具调用结果
    tool_results: List[Dict[str, Any]]
    
    # 智能体状态
    is_complete: bool
    has_error: bool
    error_message: Optional[str]
    
    # 元数据
    metadata: Dict[str, Any]
    
    # 会话和模式信息
    session_id: Optional[str]
    mode: Optional[str]
    
    # 混合模式的方法选择
    chosen_approach: Optional[str]
    
    # 当前计划
    current_plan: Optional[Plan]

    # 计划执行的结果
    execution_result: Optional[Any]

    # 计划执行后的结果评估
    evaluation_result: Optional[str]
    
    # 计划失败标志
    plan_failed: Optional[bool]
    
    # 重新规划系统字段
    replan_decision: Optional[ReplanDecision]
    adaptation_context: Optional[AdaptationContext]
    replan_result: Optional[str]
    replan_record: Optional[Dict[str, Any]]
    replanning_attempts: Optional[int]
    
    # 反思系统字段
    reflection_enabled: Optional[bool]
    reflection_iterations: Optional[int]
    reflection_history: Optional[List[Dict[str, Any]]]
    final_quality_score: Optional[float]
    reflection_improvements: Optional[List[str]]
    original_response: Optional[str]

def create_initial_state(input_text: str, max_steps: int = 10) -> AgentState:
    """初始化智能体状态"""
    return AgentState(
        input=input_text,
        output=None,
        thoughts=[],
        actions=[],
        observations=[],
        current_step=0,
        max_steps=max_steps,
        tool_results=[],
        is_complete=False,
        has_error=False,
        error_message=None,
        metadata={},
        session_id=None,
        mode=None,
        chosen_approach=None,
        current_plan=None,
        evaluation_result=None,
        execution_result=None,
        plan_failed=None,
        replan_decision=None,
        adaptation_context=None,
        replan_result=None,
        replan_record=None,
        replanning_attempts=0,
        reflection_enabled=None,
        reflection_iterations=None,
        reflection_history=None,
        final_quality_score=None,
        reflection_improvements=None,
        original_response=None
    )