from graph import WisdomGraph, SpaceType
from spaces.perception import PerceptionSpace
from spaces.planning import PlanningSpace
from spaces.reasoning import ReasoningSpace
from spaces.action import ActionSpace
from config import PHYSICAL_CONSTRAINTS, ENVIRONMENT_CONFIG
import numpy as np
from datetime import datetime

class Agent:
    """Agent with four cognitive spaces as defined in the paper"""
    def __init__(self):
        """Initialize agent with default settings"""
        # World parameters
        self.world_size = (20, 20)  # Default world size (width, height)
        
        # Agent state
        self.position = (0, 0)
        self.energy = 100
        self.knowledge = 60  # Add back knowledge attribute
        self.score = 0
        
        # Planning parameters
        self.perception_range = 5
        self.planning_horizon = 10
        
        # Initialize spaces
        self.perception_space = PerceptionSpace()
        self.planning_space = PlanningSpace()
        self.reasoning_space = ReasoningSpace()
        self.action_space = ActionSpace()
        
        # Core components
        self.wisdom = WisdomGraph()
        self.memory = []
        
        # State
        self.target = (10, 10)  # Default target
        self.capability = 70
        
        # Physical properties
        self.__dict__.update(PHYSICAL_CONSTRAINTS)
        self.environment = ENVIRONMENT_CONFIG.copy()
        
        # Core properties
        self.autopilot_provider = "Suanfamama"
        self.brain_provider = "Suanfamama"
        self.body_provider = "Suanfamama"
        self.designed_for = ["Suanfamama_beauty_capture_tasks"]
        
        # Learning parameters
        self.q_table = {}
        self.learning_rate = 0.1
        self.discount_factor = 0.9
        self.epsilon = 0.1
        
        # State variables
        self.current_task = "explore 3D space"
        
        # Other properties (simplified for clarity)
        self.sub_class_1 = "red"
        self.sub_class_2 = "robot"
        self.tasks_waiting_list = []

    def think(self):
        """Complete cognitive cycle through all spaces"""
        perception = self.perception_space.perceive(self.get_observation())
        plan = self.planning_space.generate_plan(self.get_state(), self.target)
        action = self.reasoning_space.decide(perception, plan)
        result = self.action_space.execute(action)
        
        self._update_wisdom(perception, plan, action, result)
        return result

    def get_observation(self):
        """Get current observation for perception"""
        return {
            "position": self.position,
            "target": self.target,
            "energy": self.energy,
            "knowledge": self.knowledge,
            "score": self.score
        }

    def get_state(self):
        """Get current agent state"""
        return {
            "position": self.position,
            "target": self.target,
            "energy": self.energy,
            "knowledge": self.knowledge,
            "score": self.score,
            "capability": self.capability
        }

    def _update_wisdom(self, perception, plan, action, result):
        """Update wisdom graph with new information"""
        perception_id = self.perception_space.perceive(perception)
        plan_id = self.planning_space.generate_plan(self.get_state(), self.target)
        reason_id = self.reasoning_space.decide(perception_id, plan)
        action_id = self.action_space.execute(reason_id)
        
        # Connect spaces in wisdom graph
        self.wisdom.add_cross_space_edge(perception_id, plan_id,
            SpaceType.PERCEPTION, SpaceType.PLANNING)
        self.wisdom.add_cross_space_edge(plan_id, reason_id,
            SpaceType.PLANNING, SpaceType.REASONING)
        self.wisdom.add_cross_space_edge(reason_id, action_id,
            SpaceType.REASONING, SpaceType.ACTION)

    def perceive(self, observation: dict):
        """Process observations in perception space"""
        print("\n🔍 PERCEPTION SPACE")
        
        # Physical environment perception
        print("Environment Analysis:")
        environment = {
            "type": "indoor_room",
            "objects": [
                {"type": "flower_vase", "position": (20, 20, 0), "aesthetic_value": 0.8},
                {"type": "clothing_rack", "position": (80, 80, 0), "style_score": 0.9},
                {"type": "window", "position": (100, 50, 50), "light_level": 0.7}
            ],
            "lighting": "natural_daylight",
            "space_quality": "organized",
            "ambient_temperature": 22  # Celsius
        }
        print(f"- Room type: {environment['type']}")
        print(f"- Objects detected: {len(environment['objects'])}")
        print(f"- Lighting condition: {environment['lighting']}")
        
        # Self-state perception
        print("\nSelf State Analysis:")
        print(f"- Current position: {self.position}")
        print(f"- Energy level: {self.energy}/100")
        print(f"- Knowledge level: {self.knowledge}/100")
        print(f"- Score: {self.score}")
        print(f"- Capability: {self.capability}/100")
        
        # Distance analysis to key objects
        distances = {obj["type"]: self._calculate_distance(self.position, obj["position"]) 
                    for obj in environment["objects"]}
        print("\nDistance Analysis:")
        for obj_type, dist in distances.items():
            print(f"- Distance to {obj_type}: {dist:.1f} units")
        
        # Create perception node with rich features
        node_id = f"perception_{datetime.now().timestamp()}"
        features = {
            "agent_state": {
                "position": self.position,
                "target": self.target,
                "distance_to_target": self._calculate_distance(self.position, self.target),
                "energy": self.energy,
                "knowledge": self.knowledge,
                "score": self.score
            },
            "environment": environment,
            "distances": distances,
            "timestamp": datetime.now().isoformat()
        }
        
        self.wisdom.add_perception_node(
            node_id=node_id,
            features=features,
            level=1,
            importance_score=0.8
        )
        
        self.memory.append(features)
        return node_id
        
    def plan(self, perception_id: str):
        """Planning using HTN as defined in Section 3.2"""
        print("\n🗺️ PLANNING SPACE")
        
        # Create high-level navigation task
        high_level_task = TaskNode(
            task_id="navigate_to_target",
            task_type="compound",
            description="Navigate to target position"
        )
        
        # Define subtasks based on current state
        current_pos = self.position
        target_pos = self.target
        distance = self._calculate_distance(current_pos, target_pos)
        
        if distance > 10:  # Long distance navigation
            # Break into stages
            high_level_task.add_subtask(TaskNode(
                task_id="plan_route",
                task_type="primitive",
                description="Plan optimal route"
            ))
            high_level_task.add_subtask(TaskNode(
                task_id="check_resources",
                task_type="primitive",
                description="Verify energy requirements"
            ))
            high_level_task.add_subtask(TaskNode(
                task_id="execute_movement",
                task_type="compound",
                description="Move towards target"
            ))
        else:  # Short distance navigation
            high_level_task.add_subtask(TaskNode(
                task_id="direct_movement",
                task_type="primitive",
                description="Move directly to target"
            ))
        
        # Calculate costs for each task
        planner = HierarchicalPlanner()
        primitive_tasks = planner.decompose_task(high_level_task)
        
        total_cost = sum(planner.calculate_cost(task) for task in primitive_tasks)
        
        # Create plan node in wisdom graph
        plan_id = f"plan_{datetime.now().timestamp()}"
        self.wisdom.add_planning_node(
            node_id=plan_id,
            task_info={
                "high_level_task": high_level_task,
                "primitive_tasks": primitive_tasks,
                "total_cost": total_cost
            },
            level=1
        )
        
        print(f"\nGenerated plan with {len(primitive_tasks)} primitive tasks")
        print(f"Total plan cost: {total_cost:.2f}")
        
        return plan_id
        
    def reason(self, plan_id: str):
        """
        Probabilistic reasoning using Bayesian network as defined in Section 3.3
        """
        print("\n🤔 REASONING SPACE")
        
        # Get available plans
        plans = self.wisdom.planning_graph.nodes[plan_id]['task_info']['available_plans']
        
        # Initialize inference engine
        inference = VariableElimination(self.reasoning_network)
        
        evaluations = []
        for plan in plans:
            # Update evidence based on current state
            evidence = {
                'resource_available': 1 if self.energy > plan['energy_required'] else 0,
                'environmental_condition': self._assess_environment(),
                'uncertainty': 0 if len(self.q_table) > 100 else 1  # Based on Q-learning experience
            }
            
            # Perform probabilistic inference
            query_result = inference.query(
                variables=['task_success'],
                evidence=evidence
            )
            
            success_prob = query_result.values[1]  # Probability of success
            
            evaluation = {
                "plan_type": plan["type"],
                "probability": success_prob,
                "evidence": evidence,
                "q_value": self._get_plan_q_value(plan)
            }
            
            evaluations.append(evaluation)
            
            print(f"\nPlan Evaluation: {plan['type']}")
            print(f"- Success Probability: {success_prob:.2f}")
            print(f"- Evidence: {evidence}")
        
        # Select best plan using probabilistic reasoning
        best_evaluation = max(evaluations, key=lambda x: x['probability'] * x['q_value'])
        
        # Create reasoning node
        reason_id = f"reason_{datetime.now().timestamp()}"
        self.wisdom.add_reasoning_node(
            node_id=reason_id,
            state_info={
                "evaluations": evaluations,
                "selected_plan": best_evaluation
            },
            probability=best_evaluation['probability']
        )
        
        return reason_id

    def _assess_environment(self):
        """Assess current environmental conditions"""
        # Example factors that could make environment favorable/unfavorable
        factors = {
            "distance_to_goal": self._calculate_distance(self.position, self.target) < 50,
            "energy_level": self.energy > 40,
            "known_state": len(self.q_table) > 0
        }
        return 1 if sum(factors.values()) >= 2 else 0

    def get_state_key(self, position):
        """Convert continuous position to discrete state"""
        # Discretize space into 10x10x10 grid
        x = int(position[0] / 10)
        y = int(position[1] / 10)
        z = int(position[2] / 10)
        return f"{x},{y},{z}"

    def get_reward(self, state, action, new_state):
        """Enhanced reward function with physical constraints"""
        reward = super().get_reward(state, action, new_state)
        
        # Additional physical rewards/penalties
        if action in ["move_up", "diagonal_up_forward"]:
            reward -= 2.0  # Extra penalty for moving against gravity
        elif action in ["move_down", "diagonal_down_backward"]:
            reward -= 0.5  # Less penalty for moving with gravity
        
        # Penalty for falling
        if new_state[2] < state[2] and not self._has_ground_support(state):
            reward -= 5.0  # Significant penalty for falling
        
        # Reward for efficient movement
        if self._is_efficient_movement(state, action, new_state):
            reward += 1.0
        
        return reward

    def _is_efficient_movement(self, state, action, new_state):
        """Check if movement is energy-efficient"""
        # Prefer horizontal movement when possible
        if state[2] == new_state[2]:  # Same height level
            return True
        # Vertical movement only when necessary
        return self._is_vertical_movement_necessary(state, self.target)

    def get_q_value(self, state, action):
        """Get Q-value for state-action pair"""
        return self.q_table.get((state, action), 0.0)

    def update_q_value(self, state, action, reward, next_state):
        """
        Q(s,a) ← Q(s,a) + α[R(s,a) + γ max_a' Q(s',a') - Q(s,a)]
        Implementation of Q-learning formula from paper Section 3.4
        """
        # Get current Q-value
        current_q = self.get_q_value(state, action)
        
        # Get max Q-value for next state (max_a' Q(s',a'))
        next_actions = self.get_possible_actions(next_state)
        max_next_q = max(self.get_q_value(next_state, a) for a in next_actions)
        
        # Apply Q-learning formula from paper
        # α: learning rate
        # γ: discount factor
        new_q = current_q + self.learning_rate * (
            reward + self.discount_factor * max_next_q - current_q
        )
        
        # Update Q-table
        self.q_table[(state, action)] = new_q
        
        print(f"Q-Learning Update:")
        print(f"- State: {state}")
        print(f"- Action: {action}")
        print(f"- Reward R(s,a): {reward:.1f}")
        print(f"- Q-value update: {current_q:.1f} → {new_q:.1f}")

    def get_possible_actions(self, state):
        """Get all possible 3D movement actions"""
        return self.action_space.get_possible_actions(state)

    def choose_action(self, state):
        """Choose action using epsilon-greedy policy"""
        if random.random() < self.epsilon:
            # Exploration: choose random action
            return random.choice(self.get_possible_actions(state))
        else:
            # Exploitation: choose best action
            actions = self.get_possible_actions(state)
            return max(actions, key=lambda a: self.get_q_value(state, a))

    def act(self, reason_id: str):
        """Execute actions with Q-learning as defined in paper"""
        print("\n⚡ ACTION SPACE")
        
        # Current state s
        current_state = self.position
        
        # Choose action a using ε-greedy policy
        action = self.choose_action(current_state)
        
        # Execute action and get new state s'
        new_state = self.execute_action(action, current_state)
        
        # Get reward R(s,a)
        reward = self.get_reward(current_state, action, new_state)
        
        # Update Q(s,a) according to paper's formula
        self.update_q_value(current_state, action, reward, new_state)
        
        # Update agent state
        self.position = new_state
        self.energy -= 1  # Energy cost per action
        
        print(f"\nAction Results:")
        print(f"- Action taken: {action}")
        print(f"- Position: {current_state} → {new_state}")
        print(f"- Reward: {reward:.1f}")
        print(f"- Energy: {self.energy}/100")
        
        # Create action node
        action_id = f"action_{datetime.now().timestamp()}"
        self.wisdom.add_action_node(
            node_id=action_id,
            state_info={
                "action": action,
                "reward": reward,
                "q_value": self.get_q_value(current_state, action)
            },
            reward=reward,
            level=1
        )
        return action_id

    def execute_action(self, action, state):
        """Execute action in full 3D space with physical constraints"""
        x, y, z = state
        step = 1.0
        
        # Energy costs for different movements
        energy_costs = {
            "move_forward": 1.0,      # Base energy cost
            "move_backward": 1.2,     # Slightly more energy to move backward
            "turn_left": 0.8,         # Turning costs less energy
            "turn_right": 0.8,
            "move_up": 2.5,          # Moving up costs significantly more energy (against gravity)
            "move_down": 0.5,        # Moving down costs less (assisted by gravity)
            "diagonal_up_forward": 3.0,  # Combined vertical and horizontal movement
            "diagonal_down_backward": 1.5
        }
        
        # Check if agent has enough energy for the action
        if self.energy < energy_costs.get(action, 1.0):
            print(f"⚠️ Insufficient energy for {action}")
            return state
        
        # Full 3D movement with physical constraints
        if action == "move_forward":
            new_state = (x + step, y + step, z)
        elif action == "move_backward":
            new_state = (x - step, y - step, z)
        elif action == "turn_left":
            new_state = (x - step, y + step, z)
        elif action == "turn_right":
            new_state = (x + step, y - step, z)
        elif action == "move_up":
            # Check for maximum jump/climb height
            if z + step <= self.max_height:
                new_state = (x, y, z + step)
            else:
                print("⚠️ Maximum height reached")
                return state
        elif action == "move_down":
            # Always allow downward movement (gravity)
            new_state = (x, y, z - step)
        elif action == "diagonal_up_forward":
            if z + step <= self.max_height:
                new_state = (x + step, y + step, z + step)
            else:
                print("⚠️ Maximum height reached")
                return state
        elif action == "diagonal_down_backward":
            new_state = (x - step, y - step, z - step)
        else:
            return state
        
        # Apply gravity - agent falls if no ground support
        if not self._has_ground_support(new_state):
            new_state = (new_state[0], new_state[1], max(0, new_state[2] - self.gravity_effect))
            print("⚠️ Falling due to gravity")
        
        # Ensure we stay within environment bounds
        size = self.environment['size']
        new_state = tuple(
            min(max(coord, 0), size-1) for coord in new_state
        )
        
        # Deduct energy cost
        self.energy -= energy_costs.get(action, 1.0)
        
        return new_state

    def _has_ground_support(self, state):
        """Check if position has ground support"""
        x, y, z = state
        # Check if there's ground or platform below
        return z == 0 or self._check_platform(x, y, z)

    def _check_platform(self, x, y, z):
        """Check if there's a platform at given coordinates"""
        # This would check the environment for platforms/surfaces
        # For now, just check if we're at ground level
        return z == 0

    def _calculate_distance(self, pos1, pos2):
        return sum((a - b) ** 2 for a, b in zip(pos1, pos2)) ** 0.5

    def _get_plan_q_value(self, plan):
        """Get Q-value estimate for a plan"""
        # Simple heuristic based on plan type
        base_value = {
            "safe_path": 0.7,
            "risky_path": 0.4,
            "wait_and_observe": 0.5
        }.get(plan["type"], 0.3)
        
        # Adjust based on experience
        experience_factor = min(1.0, len(self.q_table) / 1000)
        return base_value * (1 + experience_factor)

    def generate_plan(self):
        """Generate navigation plan considering resources and constraints"""
        print("\n🗺️ Generating Plan...")
        
        # 1. Path Planning
        waypoints = self._generate_waypoints()
        
        # 2. Resource Planning
        energy_requirements = self._calculate_energy_requirements(waypoints)
        
        # 3. Risk Assessment
        risk_levels = self._assess_path_risks(waypoints)
        
        # 4. Plan Optimization
        optimized_waypoints = self._optimize_plan(waypoints, energy_requirements, risk_levels)
        
        return {
            "waypoints": optimized_waypoints,
            "energy_requirements": energy_requirements,
            "risk_levels": risk_levels,
            "type": self._determine_plan_type(optimized_waypoints)
        }

    def _generate_waypoints(self):
        """Generate path waypoints using A* algorithm"""
        start = self.position
        goal = self.target
        
        # Initialize open and closed sets
        open_set = {start}
        closed_set = set()
        came_from = {}
        
        # Cost tracking
        g_score = {start: 0}
        f_score = {start: self._heuristic(start, goal)}
        
        while open_set:
            current = min(open_set, key=lambda pos: f_score[pos])
            
            if current == goal:
                return self._reconstruct_path(came_from, current)
            
            open_set.remove(current)
            closed_set.add(current)
            
            for neighbor in self._get_neighbors(current):
                if neighbor in closed_set:
                    continue
                
                tentative_g = g_score[current] + self._movement_cost(current, neighbor)
                
                if neighbor not in open_set:
                    open_set.add(neighbor)
                elif tentative_g >= g_score.get(neighbor, float('inf')):
                    continue
                
                came_from[neighbor] = current
                g_score[neighbor] = tentative_g
                f_score[neighbor] = g_score[neighbor] + self._heuristic(neighbor, goal)
        
        return None

    def _optimize_plan(self, waypoints, energy_requirements, risk_levels):
        """Optimize plan considering multiple objectives"""
        optimized_waypoints = []
        current_energy = self.energy
        
        for i, waypoint in enumerate(waypoints):
            # Check energy requirements
            if energy_requirements[i] > current_energy:
                # Find nearest recharge point
                recharge_point = self._find_nearest_recharge(waypoint)
                if recharge_point:
                    optimized_waypoints.append(recharge_point)
                    current_energy = 100  # Recharge
            
            # Check risk level
            if risk_levels[i] > 0.7:  # High risk
                # Find alternative safer path
                safe_point = self._find_safe_alternative(waypoint)
                if safe_point:
                    optimized_waypoints.append(safe_point)
            
            optimized_waypoints.append(waypoint)
            current_energy -= energy_requirements[i]
        
        return optimized_waypoints

    def _determine_plan_type(self, waypoints):
        """Determine plan type based on characteristics"""
        if len(waypoints) > 10:
            return "multi_stage"
        elif self._has_recharge_points(waypoints):
            return "energy_efficient"
        elif self._is_direct_path(waypoints):
            return "direct"
        else:
            return "hybrid"

    def _heuristic(self, start, goal):
        """Calculate heuristic distance between two points
        
        Args:
            start: Starting position (x, y) tuple
            goal: Goal position (x, y) tuple
            
        Returns:
            float: Estimated distance between points
        """
        # Using Manhattan distance as heuristic
        return abs(start[0] - goal[0]) + abs(start[1] - goal[1])

    def _get_neighbors(self, position):
        """Get valid neighboring positions for pathfinding
        
        Args:
            position: Current position (could be tuple, list or Position object)
            
        Returns:
            list: List of valid neighboring positions
        """
        # Handle different position formats
        if hasattr(position, 'x') and hasattr(position, 'y'):
            # Position object
            x = position.x
            y = position.y
        elif isinstance(position, (list, tuple)):
            # List or tuple format
            if len(position) >= 2:
                x, y = position[0], position[1]
            else:
                raise ValueError(f"Invalid position format: {position}")
        else:
            raise ValueError(f"Unsupported position type: {type(position)}")

        possible_neighbors = [
            (x+1, y),  # right
            (x-1, y),  # left
            (x, y+1),  # down
            (x, y-1),  # up
        ]
        
        # Filter valid neighbors
        valid_neighbors = []
        for nx, ny in possible_neighbors:
            # Check bounds
            if 0 <= nx < self.world_size[0] and 0 <= ny < self.world_size[1]:
                # Check if position is traversable
                if not self._is_blocked((nx, ny)):
                    valid_neighbors.append((nx, ny))
                
        return valid_neighbors

    def _is_blocked(self, position):
        """Check if a position is blocked by obstacles
        
        Args:
            position: Position to check (x,y) tuple
            
        Returns:
            bool: True if position is blocked, False otherwise
        """
        # Add obstacle checking logic here
        # For now just return False to allow all positions
        return False

    def _movement_cost(self, current, next_pos):
        """Calculate cost of moving from current to next position
        
        Args:
            current: Current position 
            next_pos: Next position
            
        Returns:
            float: Movement cost between positions
        """
        # Base movement cost
        base_cost = 1.0
        
        # Get terrain/obstacle costs if available
        terrain_cost = self._get_terrain_cost(next_pos)
        
        # Calculate energy cost based on movement
        energy_cost = self._calculate_energy_cost(current, next_pos)
        
        # Total cost is combination of base, terrain and energy
        total_cost = base_cost + terrain_cost + energy_cost
        
        return total_cost

    def _get_terrain_cost(self, position):
        """Get terrain-based movement cost for position
        
        Args:
            position: Position to check
            
        Returns:
            float: Terrain cost modifier (0.0 = normal, >0 = harder)
        """
        # Add terrain cost logic here
        # For now return 0 (normal terrain)
        return 0.0

    def _calculate_energy_cost(self, current, next_pos):
        """Calculate energy cost of movement
        
        Args:
            current: Current position
            next_pos: Next position
            
        Returns:
            float: Energy cost of movement
        """
        # Simple distance-based energy cost
        distance = self._calculate_distance(current, next_pos)
        return distance * 0.1  # 0.1 energy per unit distance

    def _calculate_energy_requirements(self, waypoints):
        """Calculate energy requirements for following a path
        
        Args:
            waypoints: List of positions forming the path
            
        Returns:
            dict: Energy requirement details including:
                - total_cost: Total energy needed
                - breakdown: Cost per segment
                - risk_factors: Potential energy risks
        """
        if not waypoints:
            return {
                "total_cost": 0,
                "breakdown": [],
                "risk_factors": []
            }
        
        energy_details = {
            "total_cost": 0,
            "breakdown": [],
            "risk_factors": []
        }
        
        # Calculate segment costs
        for i in range(len(waypoints) - 1):
            current = waypoints[i]
            next_pos = waypoints[i + 1]
            
            # Calculate base movement cost
            segment_cost = self._movement_cost(current, next_pos)
            
            # Add terrain difficulty factor
            terrain_factor = self._get_terrain_cost(next_pos)
            
            # Calculate total segment energy requirement
            total_segment_cost = segment_cost * (1 + terrain_factor)
            
            # Add to breakdown
            energy_details["breakdown"].append({
                "from": current,
                "to": next_pos,
                "cost": total_segment_cost,
                "terrain_factor": terrain_factor
            })
            
            # Add to total
            energy_details["total_cost"] += total_segment_cost
            
            # Check for risky segments (high energy cost)
            if total_segment_cost > self.energy * 0.3:  # If segment costs >30% of current energy
                energy_details["risk_factors"].append({
                    "segment": (current, next_pos),
                    "cost": total_segment_cost,
                    "risk_level": "high"
                })
        
        return energy_details

    def _assess_path_risks(self, waypoints):
        """Assess risks along planned path
        
        Args:
            waypoints: List of positions forming the path
            
        Returns:
            dict: Risk assessment including:
                - overall_risk: Low/Medium/High
                - risk_points: List of risky locations
                - mitigation: Suggested risk mitigation
        """
        if not waypoints:
            return {
                "overall_risk": "none",
                "risk_points": [],
                "mitigation": []
            }
        
        risks = {
            "overall_risk": "low",
            "risk_points": [],
            "mitigation": []
        }
        
        # Track cumulative risk factors
        energy_drain = 0
        difficult_terrain_count = 0
        
        # Analyze each segment
        for i in range(len(waypoints) - 1):
            current = waypoints[i]
            next_pos = waypoints[i + 1]
            
            # Check energy requirements
            segment_cost = self._movement_cost(current, next_pos)
            energy_drain += segment_cost
            
            # Check terrain difficulty
            terrain_risk = self._get_terrain_cost(next_pos)
            if terrain_risk > 0.5:  # High terrain difficulty
                difficult_terrain_count += 1
                risks["risk_points"].append({
                    "position": next_pos,
                    "type": "difficult_terrain",
                    "severity": terrain_risk
                })
                
                # Add mitigation suggestion
                risks["mitigation"].append({
                    "type": "terrain",
                    "location": next_pos,
                    "suggestion": "Consider alternative route or rest before attempt"
                })
        
        # Assess overall risk level
        if energy_drain > self.energy * 0.8:  # >80% energy use
            risks["overall_risk"] = "high"
            risks["mitigation"].append({
                "type": "energy",
                "suggestion": "Plan rest stops or energy restoration points"
            })
        elif energy_drain > self.energy * 0.5:  # >50% energy use
            risks["overall_risk"] = "medium"
            risks["mitigation"].append({
                "type": "energy",
                "suggestion": "Monitor energy levels carefully"
            })
        
        # Consider terrain difficulty in overall risk
        if difficult_terrain_count > len(waypoints) * 0.3:  # >30% difficult terrain
            risks["overall_risk"] = "high"
            risks["mitigation"].append({
                "type": "path",
                "suggestion": "Consider replanning path with easier terrain"
            })
        
        return risks

    def display_status(self):
        """Display current agent status"""
        print("\nAgent Status:")
        print(f"- Current position: {self.position}")
        print(f"- Energy level: {self.energy}/100")
        print(f"- Knowledge level: {self.knowledge}/100")
        print(f"- Score: {self.score}")
        print(f"- Capability: {self.capability}/100")

class FashionAgent(Agent):
    """Specialized agent for fashion tasks (Future extension)"""
    def __init__(self):
        super().__init__()
        self.designed_for = ["Suanfamama_beauty_capture_tasks"]

class EducationAgent(Agent):
    """Specialized agent for education tasks (Future extension)"""
    pass