import unittest
from env import World, Position, Object3D
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
import random
import networkx as nx
from pgmpy.inference import VariableElimination
from agent import Agent
import pandas as pd
import os

# Configure NumExpr threading before tests
def configure_numexpr():
    """Configure NumExpr threading settings"""
    # Method 1: Environment variable
    os.environ["NUMEXPR_MAX_THREADS"] = "4"  # Set to desired number
    
    # Method 2: Direct configuration
    import numexpr
    numexpr.set_num_threads(4)  # Set to desired number

    print(f"\nNumExpr Configuration:")
    print(f"- Maximum threads: {numexpr.detect_number_of_cores()}")
    print(f"- Active threads: {numexpr.get_num_threads()}")

class TestBase(unittest.TestCase):
    """Base test class with setup"""
    @classmethod
    def setUpClass(cls):
        """Run once before all tests"""
        configure_numexpr()

class TestEnvInit(TestBase):
    """Test environment initialization and basic properties"""
    
    def setUp(self):
        print("\n=== Testing Environment Initialization ===")
        self.world = World()
        self.size = (10, 10, 5)  # 3D space dimensions
        
        # Basic environment properties
        self.env_config = {
            'size': self.size,
            'gravity': True,
            'terrain': {},
            'platforms': [],
            'obstacles': [],
            'lighting': 'natural',
            'temperature': 22,
            'humidity': 0.5
        }

    def test_env_creation(self):
        """Test basic environment creation"""
        print("\nTesting environment creation...")
        
        # Test dimensions
        self.assertEqual(self.world.size, self.size, "Environment should have correct dimensions")
        
        # Test physical properties
        self.assertTrue(self.world.has_gravity, "Environment should have gravity by default")
        self.assertIsNotNone(self.world.terrain, "Environment should have terrain")

    def test_terrain_generation(self):
        """Test terrain generation"""
        print("\nTesting terrain generation...")
        
        terrain = self.world.generate_terrain()
        
        # Test terrain properties
        self.assertTrue(all(0 <= h <= 1 for h in terrain.values()), 
                       "Terrain heights should be normalized")
        self.assertEqual(
            len(terrain), 
            self.size[0] * self.size[1],
            "Terrain should cover entire ground plane"
        )

    def test_platform_placement(self):
        """Test platform placement in environment"""
        print("\nTesting platform placement...")
        
        # Add test platform
        platform = {
            'x': 2,
            'y': 2,
            'z': 1,
            'width': 2,
            'length': 2
        }
        self.world.add_platform(platform)
        
        # Test platform constraints
        self.assertTrue(self.world.is_valid_position((2, 2, 1)), 
                       "Should be valid position on platform")
        self.assertFalse(self.world.is_valid_position((2, 2, 2)), 
                        "Should be invalid position above platform")

    def test_collision_detection(self):
        """Test environment collision detection"""
        print("\nTesting collision detection...")
        
        # Add obstacle
        obstacle = {
            'position': (5, 5, 1),
            'size': (1, 1, 1)
        }
        self.world.add_obstacle(obstacle)
        
        # Test collisions
        self.assertTrue(
            self.world.check_collision((5, 5, 1)),
            "Should detect collision with obstacle"
        )
        self.assertFalse(
            self.world.check_collision((0, 0, 0)),
            "Should not detect collision in empty space"
        )

    def test_environmental_conditions(self):
        """Test environmental conditions"""
        print("\nTesting environmental conditions...")
        
        # Test lighting conditions
        self.world.set_lighting('natural')
        self.assertEqual(self.world.visibility, 1.0, 
                        "Natural lighting should have full visibility")
        
        self.world.set_lighting('dim')
        self.assertLess(self.world.visibility, 1.0, 
                       "Dim lighting should reduce visibility")

    def test_physical_constraints(self):
        """Test physical constraints in environment"""
        print("\nTesting physical constraints...")
        
        # Test gravity
        self.assertTrue(
            self.world.apply_gravity((5, 5, 5))[2] < 5,
            "Gravity should reduce height"
        )
        
        # Test boundaries
        self.assertTrue(
            all(0 <= x < s for x, s in zip(self.world.constrain_position((11, 11, 11)), self.size)),
            "Position should be constrained within boundaries"
        )

class TestAgentInit(TestBase):
    """Test agent initialization and basic capabilities"""
    
    def setUp(self):
        print("\n=== Testing Agent Initialization ===")
        self.agent = Agent()

    def test_core_properties(self):
        """Test core agent properties are properly initialized"""
        print("\nTesting core properties...")
        
        # Test provider information
        self.assertEqual(self.agent.autopilot_provider, "Suanfamama")
        self.assertEqual(self.agent.brain_provider, "Suanfamama")
        self.assertEqual(self.agent.body_provider, "Suanfamama")
        self.assertIn("Suanfamama_beauty_capture_tasks", self.agent.designed_for)

    def test_state_variables(self):
        """Test initial state variables"""
        print("\nTesting state variables...")
        pass # TODO:

    def test_learning_parameters(self):
        """Test Q-learning parameters"""
        print("\nTesting learning parameters...")
        pass # TODO:

    def test_bayesian_network(self):
        """Test Bayesian network initialization"""
        print("\nTesting Bayesian network...")
        
        # Test network structure
        expected_edges = [
            ('task_success', 'action_choice'),
            ('resource_available', 'task_success'),
            ('environmental_condition', 'task_success'),
            ('q_value', 'action_choice'),
            ('uncertainty', 'task_success')
        ]
        
        # Now access through reasoning_space
        for edge in expected_edges:
            self.assertIn(edge, self.agent.reasoning_space.network.edges())
        
        # Test all nodes have CPDs
        for node in self.agent.reasoning_space.network.nodes():
            self.assertIsNotNone(
                self.agent.reasoning_space.network.get_cpds(node),
                f"Node {node} should have a CPD"
            )
        
        # Test model is valid
        self.assertTrue(
            self.agent.reasoning_space.network.check_model(),
            "Bayesian network should be properly initialized"
        )

    def test_cognitive_components(self):
        """Test cognitive components initialization"""
        print("\nTesting cognitive components...")
        
        # Test wisdom graph initialization
        self.assertIsNotNone(self.agent.wisdom)
        self.assertEqual(len(self.agent.memory), 0, "Memory should start empty")

    def test_action_space(self):
        """Test action space initialization"""
        print("\nTesting action space...")
        
        expected_actions = {
            "move_forward",    # +x, +y
            "move_backward",   # -x, -y
            "turn_left",       # -x, +y
            "turn_right",      # +x, -y
            "move_up",         # +z
            "move_down",       # -z
            "diagonal_up_forward",     # +x, +y, +z
            "diagonal_down_backward"   # -x, -y, -z
        }
        
        available_actions = set(self.agent.get_possible_actions(self.agent.position))
        self.assertEqual(available_actions, expected_actions,
                        "Agent should have all basic movement actions")

    def test_method_availability(self):
        """Test that all required methods are available"""
        print("\nTesting method availability...")
        
        required_methods = [
            'think',          # Cognitive cycle
            'perceive',       # Perception space
            'plan',          # Planning space
            'reason',        # Reasoning space
            'act',           # Action space
            'execute_action', # Action execution
            'get_reward',    # Reward calculation
            'update_q_value' # Learning update
        ]
        
        for method in required_methods:
            self.assertTrue(hasattr(self.agent, method),
                          f"Agent should have {method} method")
            self.assertTrue(callable(getattr(self.agent, method)),
                          f"{method} should be callable")

class TestAgentAction(TestBase):
    """Test agent's action space capabilities using actual Agent class"""
    def setUp(self):
        print("\n=== Initializing Agent Action Test Environment ===")
        self.agent = Agent()
        
        # Set environment size first
        size = 10
        self.agent.environment = {
            'size': size,
            'gravity': True,
            'terrain': {},
            'platforms': []
        }
        
        min_distance = 5  # Minimum distance between start and goal
        
        # Generate random start and goal positions
        while True:
            self.start_pos = (
                random.randint(0, size-1),
                random.randint(0, size-1),
                random.randint(0, 2)  # Lower z range for start
            )
            
            self.goal = (
                random.randint(0, size-1),
                random.randint(0, size-1),
                random.randint(0, 4)  # Higher z range for goal
            )
            
            # Calculate distance between start and goal
            distance = sum((a - b) ** 2 for a, b in zip(self.start_pos, self.goal)) ** 0.5
            
            # Ensure minimum distance and different positions
            if distance >= min_distance and self.start_pos != self.goal:
                break
        
        # Configure agent
        self.agent.position = self.start_pos
        self.agent.target = self.goal
        
        # Now generate platforms and terrain
        self.agent.environment.update({
            'start': self.start_pos,
            'goal': self.goal,
            'platforms': self._generate_random_platforms(),
            'terrain': self._generate_terrain()
        })
        
        # Initialize Q-learning parameters
        self.agent.learning_rate = 0.2
        self.agent.discount_factor = 0.9
        self.agent.epsilon = 0.1
        
        print(f"Agent starting at: {self.start_pos}")
        print(f"Goal position: {self.goal}")
        print(f"Distance to goal: {distance:.2f}")

    def _generate_random_platforms(self):
        """Generate random platforms for more interesting 3D navigation"""
        platforms = []
        num_platforms = random.randint(2, 5)
        size = self.agent.environment['size']
        
        for _ in range(num_platforms):
            platform = {
                'x': random.randint(0, size-2),
                'y': random.randint(0, size-2),
                'z': random.randint(1, 3),
                'width': random.randint(2, 4),
                'length': random.randint(2, 4)
            }
            platforms.append(platform)
        
        print(f"Generated {len(platforms)} platforms")
        return platforms

    def _generate_terrain(self):
        """Generate terrain height map"""
        size = self.agent.environment['size']
        terrain = {}
        
        # Create some terrain variation
        for x in range(size):
            for y in range(size):
                # Simple height variation
                height = random.uniform(0, 1)
                terrain[(x, y)] = height
        
        return terrain

    def test_agent_navigation(self):
        """Test actual agent's navigation capabilities"""
        print("\n=== Testing Agent Navigation ===")
        print(f"Terrain features:")
        print(f"- Platforms: {len(self.agent.environment['platforms'])}")
        print(f"- Gravity enabled: {self.agent.environment['gravity']}")

    def _plot_results(self, positions, rewards, q_values):
        """Enhanced visualization of agent's navigation and learning progress"""
        # Create figure with 3D capability
        fig = plt.figure(figsize=(15, 10))
        gs = fig.add_gridspec(2, 2)
        
        # 1. Path visualization (2D and 3D)
        positions = np.array(positions)
        
        # 2D trajectory
        ax1 = fig.add_subplot(gs[0, 0])
        self._plot_2d_path(ax1, positions)
        
        # 3D trajectory
        ax2 = fig.add_subplot(gs[0, 1], projection='3d')
        self._plot_3d_path(ax2, positions)
        
        # Learning metrics
        ax3 = fig.add_subplot(gs[1, :])
        self._plot_learning_metrics(ax3, rewards, q_values)
        
        plt.tight_layout()
        plt.show()

    def _plot_2d_path(self, ax, positions):
        """Plot 2D navigation path with heatmap and platforms"""
        # Project 3D positions to 2D (x-y plane)
        x = positions[:, 0]  # x coordinates
        y = positions[:, 1]  # y coordinates
        
        # Create heatmap of visited positions
        x_bins = np.linspace(0, self.agent.environment['size'], 20)
        y_bins = np.linspace(0, self.agent.environment['size'], 20)
        heatmap, _, _ = np.histogram2d(x, y, bins=[x_bins, y_bins])
        
        # Plot heatmap
        ax.imshow(heatmap.T, origin='lower', cmap='viridis', alpha=0.3,
                  extent=[0, self.agent.environment['size'], 
                         0, self.agent.environment['size']])
        
        # Plot path
        ax.plot(x, y, 'b-', alpha=0.6, label='Path')
        
        # Add start and goal markers
        ax.scatter(self.start_pos[0], self.start_pos[1], 
                  color='g', s=100, label='Start')
        ax.scatter(self.goal[0], self.goal[1], 
                  color='r', s=100, label='Goal')
        
        # Add direction arrows
        step = len(positions) // 10  # Show 10 arrows along path
        if step > 0:
            for i in range(0, len(positions)-step, step):
                dx = x[i+step] - x[i]
                dy = y[i+step] - y[i]
                ax.arrow(x[i], y[i], dx*0.5, dy*0.5,
                        head_width=0.3, head_length=0.5,
                        fc='blue', ec='blue', alpha=0.5)
        
        # Plot platforms
        for platform in self.agent.environment['platforms']:
            rect = plt.Rectangle(
                (platform['x'], platform['y']),
                platform['width'], platform['length'],
                color='gray', alpha=0.3, label='Platform'
            )
            ax.add_patch(rect)
        
        # Plot terrain contours
        terrain_x = []
        terrain_y = []
        terrain_z = []
        for (x, y), height in self.agent.environment['terrain'].items():
            terrain_x.append(x)
            terrain_y.append(y)
            terrain_z.append(height)
        
        ax.tricontour(terrain_x, terrain_y, terrain_z, 
                     alpha=0.2, cmap='terrain')
        
        ax.set_title('2D Navigation Path')
        ax.set_xlabel('X Position')
        ax.set_ylabel('Y Position')
        ax.legend()
        ax.grid(True, alpha=0.3)

    def _plot_3d_path(self, ax, positions):
        """Plot 3D navigation path with time dimension"""
        # Extract coordinates
        x = positions[:, 0]  # x coordinates
        y = positions[:, 1]  # y coordinates
        z = positions[:, 2]  # z coordinates
        time = np.arange(len(positions))
        
        # Plot 3D trajectory with color gradient based on time
        points = ax.scatter(x, y, z, c=time, cmap='viridis',
                           alpha=0.6, label='Path')
        plt.colorbar(points, ax=ax, label='Time Steps')
        
        # Plot path line
        ax.plot3D(x, y, z, 'gray', alpha=0.3)
        
        # Mark start and end points
        ax.scatter(self.start_pos[0], self.start_pos[1], self.start_pos[2],
                  color='g', s=100, label='Start')
        ax.scatter(self.goal[0], self.goal[1], self.goal[2],
                  color='r', s=100, label='Goal')
        
        # Set labels and title
        ax.set_title('3D Navigation Path')
        ax.set_xlabel('X Position')
        ax.set_ylabel('Y Position')
        ax.set_zlabel('Z Position')
        
        # Set axis limits
        size = self.agent.environment['size']
        ax.set_xlim(0, size)
        ax.set_ylim(0, size)
        ax.set_zlim(0, size)
        
        ax.legend()
        
        # Add grid
        ax.grid(True, alpha=0.3)
        
        # Set optimal viewing angle
        ax.view_init(elev=30, azim=45)

    def _plot_learning_metrics(self, ax, rewards, q_values):
        """Plot learning metrics with confidence intervals"""
        steps = np.arange(len(rewards))
        
        # Plot raw rewards
        ax.plot(steps, rewards, 'g.', alpha=0.2, label='Raw Rewards')
        
        # Adjust window size based on data length
        window = min(50, len(rewards) // 4)  # Window size should be smaller than data length
        if window > 1:  # Only smooth if we have enough data
            # Plot smoothed rewards with confidence interval
            smoothed_rewards = self._smooth_data(rewards, window)
            confidence = self._calculate_confidence(rewards, window)
            
            ax.plot(steps, smoothed_rewards, 'g-', label='Smoothed Rewards')
            ax.fill_between(steps, 
                           smoothed_rewards - confidence,
                           smoothed_rewards + confidence,
                           color='g', alpha=0.1)
        
        # Plot Q-values on secondary axis
        ax2 = ax.twinx()
        ax2.plot(steps, q_values, 'b.', alpha=0.2, label='Q-values')
        
        # Smooth Q-values if we have enough data
        if window > 1:
            smoothed_q = self._smooth_data(q_values, window)
            q_confidence = self._calculate_confidence(q_values, window)
            
            ax2.plot(steps, smoothed_q, 'b-', label='Smoothed Q-values')
            ax2.fill_between(steps,
                            smoothed_q - q_confidence,
                            smoothed_q + q_confidence,
                            color='b', alpha=0.1)
        
        ax.set_xlabel('Steps')
        ax.set_ylabel('Rewards', color='g')
        ax2.set_ylabel('Q-values', color='b')
        
        # Combine legends
        lines1, labels1 = ax.get_legend_handles_labels()
        lines2, labels2 = ax2.get_legend_handles_labels()
        ax2.legend(lines1 + lines2, labels1 + labels2, loc='upper left')

    def _smooth_data(self, data, window):
        """Smooth data using moving average"""
        if window < 2:
            return np.array(data)
        weights = np.ones(window) / window
        return np.convolve(data, weights, mode='same')

    def _calculate_confidence(self, data, window):
        """Calculate confidence interval"""
        if window < 2:
            return np.zeros_like(data)
        rolling_std = pd.Series(data).rolling(window, min_periods=1).std()
        return rolling_std.fillna(0) * 1.96  # 95% confidence interval

class TestAgentActionBasic(TestBase):
    def generate_random_maze(self, size=10, wall_density=0.3):
        """Generate random maze with guaranteed path"""
        maze = np.random.choice([0, 1], size=(size, size), p=[1-wall_density, wall_density])
        
        # Ensure start and end positions are clear
        maze[self.start_pos] = 0
        maze[self.goal] = 0
        
        # Generate path from start to goal
        current = self.start_pos
        path = [current]
        while current != self.goal:
            x, y = current
            possible_moves = []
            for dx, dy in [(0,1), (0,-1), (1,0), (-1,0)]:
                new_x, new_y = x + dx, y + dy
                if (0 <= new_x < size and 0 <= new_y < size and 
                    (new_x, new_y) not in path):
                    possible_moves.append((new_x, new_y))
            
            if not possible_moves:
                # Backtrack if stuck
                path.pop()
                current = path[-1]
            else:
                # Move to random available position
                next_pos = min(possible_moves, 
                             key=lambda p: abs(p[0]-self.goal[0]) + abs(p[1]-self.goal[1]))
                current = next_pos
                path.append(current)
                maze[current] = 0
        
        return maze

    def setUp(self):
        print("\n=== Initializing Maze Environment ===")
        self.world = World()
        
        # Randomly select start and goal positions
        size = 10
        while True:
            self.start_pos = (random.randint(0, size-1), random.randint(0, size-1))
            self.goal = (random.randint(0, size-1), random.randint(0, size-1))
            if self.start_pos != self.goal:
                break
        
        print(f"Start position: {self.start_pos}")
        print(f"Goal position: {self.goal}")
        
        # Generate random maze
        self.maze = self.generate_random_maze()
        
        # Initialize Q-learning parameters
        self.q_table = {}
        self.learning_rate = 0.1
        self.discount_factor = 0.9
        self.epsilon = 0.1
        
        print("\n=== Q-Learning Parameters ===")
        print(f"Learning rate: {self.learning_rate}")
        print(f"Discount factor: {self.discount_factor}")
        print(f"Initial exploration rate: {self.epsilon}")
        
        # Create agent
        self.agent = Object3D(
            position=Position(self.start_pos[0]*10, self.start_pos[1]*10, 0),
            size=(5, 5, 5),
            velocity=(1, 1, 0),
            type="agent",
            properties={"mass": 1.0}
        )

    def test_maze_navigation(self):
        """Test agent navigation with visualization"""
        print("\n=== Starting Navigation Test ===")
        
        max_steps = 1000
        stuck_threshold = 50
        reset_count = 0
        max_resets = 5
        
        plt.figure(figsize=(12, 8))
        ax1 = plt.subplot2grid((2, 1), (0, 0))
        ax2 = plt.subplot2grid((2, 1), (1, 0))
        
        # Track all attempts separately
        all_attempts = []
        current_attempt = [(self.start_pos[0], self.start_pos[1])]  # Start with initial position
        rewards = []
        q_values = []
        recent_positions = []
        
        print("\n=== Episode Progress ===")
        steps = 0
        
        while steps < max_steps:
            current_state = (int(self.agent.position.x/10), int(self.agent.position.y/10))
            recent_positions.append(current_state)
            
            # Check if agent is stuck
            if len(recent_positions) > stuck_threshold:
                recent_positions = recent_positions[-stuck_threshold:]
                unique_positions = len(set(recent_positions))
                
                if unique_positions < 5:
                    if reset_count < max_resets:
                        print(f"\n⚠️ Agent stuck! Resetting position (Reset {reset_count + 1}/{max_resets})")
                        # Save current attempt before reset
                        all_attempts.append(current_attempt)
                        # Reset agent and path
                        self.agent.position = Position(self.start_pos[0]*10, self.start_pos[1]*10, 0)
                        current_attempt = [(self.start_pos[0], self.start_pos[1])]  # New attempt
                        recent_positions = []
                        reset_count += 1
                        self.epsilon = min(0.9, self.epsilon + 0.2)
                        continue
                    else:
                        print("\n❌ Max resets reached - ending navigation")
                        all_attempts.append(current_attempt)
                        break
            
            self.epsilon = max(0.1, self.epsilon * 0.9995)
            
            action = self.get_best_action(current_state) if random.random() > self.epsilon else random.choice(['up', 'down', 'left', 'right'])
            new_pos = self.execute_action(action, current_state)
            reward = self.get_reward(new_pos)
            
            # Update Q-value
            old_q = self.get_q_value(current_state, action)
            next_max_q = max([self.get_q_value(new_pos, a) for a in ['up', 'down', 'left', 'right']])
            new_q = old_q + self.learning_rate * (reward + self.discount_factor * next_max_q - old_q)
            self.q_table[(current_state, action)] = new_q
            
            if self.is_valid_move(new_pos):
                self.agent.position = Position(new_pos[0]*10, new_pos[1]*10, 0)
                current_attempt.append(new_pos)  # Add to current path
                rewards.append(reward)
                q_values.append(new_q)
                
                if steps % 50 == 0:
                    print(f"\nStep {steps}:")
                    print(f"Position: {new_pos}")
                    print(f"Distance to goal: {abs(new_pos[0]-self.goal[0]) + abs(new_pos[1]-self.goal[1])}")
                    print(f"Exploration rate: {self.epsilon:.2f}")
                    print(f"Q-value: {new_q:.2f}")
                
                if new_pos == self.goal:
                    print(f"\n🎉 Success! Goal reached in {steps} steps!")
                    print(f"Final exploration rate: {self.epsilon:.2f}")
                    all_attempts.append(current_attempt)
                    break
            
            steps += 1
        
        # Plot maze and all attempts
        ax1.imshow(self.maze, cmap='binary')
        ax1.set_title('Maze Layout and Agent Attempts\nBlack: Walls, White: Path')
        
        # Plot each attempt with different alpha values
        colors = plt.cm.rainbow(np.linspace(0, 1, len(all_attempts)))
        for i, attempt in enumerate(all_attempts):
            attempt = np.array(attempt)
            alpha = (i + 1) / len(all_attempts)  # Later attempts are more visible
            ax1.plot(attempt[:,1], attempt[:,0], '-', 
                    color=colors[i], alpha=alpha, 
                    label=f'Attempt {i+1}')
        
        # Mark start and goal
        ax1.scatter(self.start_pos[1], self.start_pos[0], color='g', s=100, label='Start')
        ax1.scatter(self.goal[1], self.goal[0], color='r', s=100, label='Goal')
        ax1.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
        
        # Plot learning progress
        ax2.plot(rewards, label='Rewards', color='g', alpha=0.3)
        
        # Plot smoothed Q-values
        window = 20
        if len(q_values) > window:
            smoothed_q = np.convolve(q_values, np.ones(window)/window, mode='valid')
            ax2.plot(smoothed_q, label='Q-values (smoothed)', color='b')
        
        ax2.set_xlabel('Steps')
        ax2.set_ylabel('Value')
        ax2.set_title('Learning Progress')
        ax2.legend()
        
        plt.tight_layout()
        plt.show()
        
        # Print final statistics
        print("\n=== Final Results ===")
        print(f"Steps taken: {len(rewards)}")
        print(f"Final position: {rewards[-1] if rewards else 'N/A'}")
        print(f"Average reward: {np.mean(rewards):.2f}")
        print(f"Final Q-table size: {len(self.q_table)}")
        print(f"Q-value range: {min(q_values):.2f} to {max(q_values):.2f}")

    def choose_action(self, state):
        """Simple action selection"""
        return random.choice(['up', 'down', 'left', 'right'])
    
    def execute_action(self, action, state):
        """Execute action and return new position"""
        x, y = state
        if action == 'up': x -= 1
        elif action == 'down': x += 1
        elif action == 'left': y -= 1
        elif action == 'right': y += 1
        return (max(0, min(9, x)), max(0, min(9, y)))
    
    def is_valid_move(self, pos):
        """Check if move is valid (not wall)"""
        return self.maze[pos[0], pos[1]] == 0
    
    def get_reward(self, pos):
        """
        Calculate reward for position with better incentives:
        - Large positive reward for reaching goal
        - Medium negative reward for hitting walls
        - Small negative reward for each step (encourages shorter paths)
        - Small positive reward for getting closer to goal
        """
        if not self.is_valid_move(pos):
            return -10  # Wall collision penalty
            
        if pos == self.goal:
            return 100  # Goal reward
            
        # Calculate distance-based reward
        current_distance = abs(pos[0] - self.goal[0]) + abs(pos[1] - self.goal[1])
        previous_distance = abs(self.agent.position.x/10 - self.goal[0]) + abs(self.agent.position.y/10 - self.goal[1])
        
        if current_distance < previous_distance:
            return 2  # Getting closer to goal
        
        return -0.1  # Small step penalty to encourage efficiency

    def get_q_value(self, state, action):
        """Get Q-value from table, initialize if not present"""
        return self.q_table.get((state, action), 0.0)  # Start with 0 instead of random

    def get_best_action(self, state):
        """Get the best action based on the Q-table"""
        best_value = float('-inf')
        best_action = None
        for action in ['up', 'down', 'left', 'right']:
            q_value = self.get_q_value(state, action)
            if q_value > best_value:
                best_value = q_value
                best_action = action
        return best_action

class TestAgentReasoning(TestBase):
    def setUp(self):
        print("\n=== Initializing Reasoning Test Environment ===")
        self.agent = Agent()
        
        # Set up some test scenarios
        self.test_plans = [
            {
                "type": "safe_path",
                "description": "Take safer but longer path",
                "energy_required": 30,
                "energy_gain": 0,
                "success_probability": 0.8
            },
            {
                "type": "risky_path",
                "description": "Take shorter but riskier path",
                "energy_required": 50,
                "energy_gain": 20,
                "success_probability": 0.6
            },
            {
                "type": "wait_and_observe",
                "description": "Wait to gather more information",
                "energy_required": 10,
                "energy_gain": 5,
                "success_probability": 0.9
            }
        ]

    def test_bayesian_reasoning(self):
        """Test agent's Bayesian reasoning capabilities"""
        print("\n=== Testing Bayesian Reasoning ===")
        
        # Create test scenarios with different conditions
        scenarios = [
            {
                "name": "High Resource Scenario",
                "energy": 90,
                "q_table_size": 150,  # Experienced agent
                "expected_best": "risky_path"  # Should take risks when resources high
            },
            {
                "name": "Low Resource Scenario",
                "energy": 20,
                "q_table_size": 50,   # Less experienced
                "expected_best": "wait_and_observe"  # Should be conservative
            },
            {
                "name": "Balanced Scenario",
                "energy": 60,
                "q_table_size": 100,
                "expected_best": "safe_path"  # Should take balanced approach
            }
        ]
        
        # Update CPDs to better reflect resource-based decision making
        self._update_cpds_for_resource_based_decisions()
        
        for scenario in scenarios:
            print(f"\nTesting {scenario['name']}:")
            print(f"- Energy Level: {scenario['energy']}")
            print(f"- Experience (Q-table size): {scenario['q_table_size']}")
            
            # Set up agent state
            self.agent.energy = scenario['energy']
            self.agent.q_table = {f"state_{j}": j for j in range(scenario['q_table_size'])}
            
            # Calculate plan probabilities with resource consideration
            plan_probabilities = self._evaluate_plans_with_resources(scenario)
            
            # Verify reasoning aligns with expectations
            best_plan = max(plan_probabilities, key=lambda x: x['probability'] * x['resource_weight'])
            print(f"\nSelected Plan: {best_plan['plan']}")
            print(f"Expected Plan: {scenario['expected_best']}")
            
            self.assertEqual(
                best_plan['plan'],
                scenario['expected_best'],
                f"Agent should choose {scenario['expected_best']} in {scenario['name']}"
            )

    def _update_cpds_for_resource_based_decisions(self):
        """Update CPDs to better reflect resource-based decision making"""
        # Implementation of the method
        pass

    def _evaluate_plans_with_resources(self, scenario):
        """Evaluate plans considering resource levels"""
        plan_probabilities = []
        
        for plan in self.test_plans:
            # Base evidence
            evidence = {
                'resource_available': 1 if scenario['energy'] > plan['energy_required'] else 0,
                'environmental_condition': 1 if scenario['energy'] > 60 else 0,
                'uncertainty': 0 if scenario['q_table_size'] > 100 else 1
            }
            
            # Calculate success probability
            query_result = VariableElimination(self.agent.reasoning_space.network).query(
                variables=['task_success'],
                evidence=evidence
            )
            
            success_prob = query_result.values[1]
            
            # Calculate resource-based weight
            resource_weight = self._calculate_resource_weight(plan, scenario['energy'])
            
            plan_probabilities.append({
                'plan': plan['type'],
                'probability': success_prob,
                'resource_weight': resource_weight,
                'evidence': evidence
            })
            
            print(f"\nPlan: {plan['type']}")
            print(f"- Success Probability: {success_prob:.2f}")
            print(f"- Resource Weight: {resource_weight:.2f}")
            print(f"- Evidence: {evidence}")
        
        return plan_probabilities

    def _calculate_resource_weight(self, plan, energy):
        """Calculate weight based on resource levels"""
        if plan['type'] == 'risky_path':
            return 1.5 if energy > 80 else 0.5  # Favor risky when energy high
        elif plan['type'] == 'wait_and_observe':
            return 1.5 if energy < 30 else 0.5  # Favor waiting when energy low
        else:  # safe_path
            return 1.0  # Balanced option

class TestAgentPlanning(TestBase):
    """Test agent's planning space capabilities (Section 3.2)"""
    def setUp(self):
        print("\n=== Initializing Planning Test Environment ===")
        self.agent = Agent()
        
        # Test planning scenarios
        self.planning_scenarios = [
            {
                "name": "Complex Navigation",
                "start": (0, 0, 0),
                "goal": (8, 8, 4),
                "obstacles": [
                    {"position": (4, 4, 2), "size": (2, 2, 2)},
                    {"position": (6, 2, 1), "size": (1, 3, 2)}
                ],
                "energy": 100,
                "expected_plan_type": "multi_stage"
            },
            {
                "name": "Resource Limited",
                "start": (0, 0, 0),
                "goal": (9, 9, 0),
                "energy": 30,
                "recharge_stations": [(3, 3, 0), (6, 6, 0)],
                "expected_plan_type": "energy_efficient"
            }
        ]

    def test_plan_generation(self):
        """Test plan generation and evaluation"""
        print("\n=== Testing Plan Generation ===")
        
        for scenario in self.planning_scenarios:
            print(f"\nTesting {scenario['name']}:")
            
            # Configure environment
            self.agent.position = scenario["start"]
            self.agent.target = scenario["goal"]
            self.agent.energy = scenario["energy"]
            
            # Generate plan
            plan = self.agent.generate_plan()
            
            # Verify plan properties
            self.assertIsNotNone(plan, "Should generate a plan")
            self.assertIn("waypoints", plan, "Plan should include waypoints")
            self.assertIn("energy_requirements", plan, "Plan should consider energy")
            
            # Test plan feasibility
            self.assertTrue(
                self._is_plan_feasible(plan, scenario),
                "Generated plan should be feasible"
            )

class TestAgentPerception(TestBase):
    """Test agent's perception space capabilities (Section 3.1)"""
    def setUp(self):
        print("\n=== Initializing Perception Test Environment ===")
        self.agent = Agent()
        
        # Test perception scenarios
        self.perception_scenarios = [
            {
                "name": "Clear Environment",
                "noise_level": "low",
                "visibility": "high",
                "expected_accuracy": 0.9
            },
            {
                "name": "Noisy Environment",
                "noise_level": "high",
                "visibility": "low",
                "expected_accuracy": 0.6
            }
        ]
        
    def test_environment_perception(self):
        """Test environmental perception and state estimation"""
        # TODO: Implement perception tests based on Section 3.1
        pass

def run_specific_test(test_class):
    """Run tests for a specific cognitive component"""
    suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
    unittest.TextTestRunner(verbosity=2).run(suite)

if __name__ == '__main__':
    import sys
    
    # Define test categories and their corresponding test classes
    test_categories = {
        'env': {  # Environment tests
            'init': TestEnvInit,
            'all': [TestEnvInit]  # Add more env test classes here
        },
        'agent': {  # Agent tests
            'init': TestAgentInit,
            'action': TestAgentAction,
            'action_basic': TestAgentActionBasic,
            'reasoning': TestAgentReasoning,
            'planning': TestAgentPlanning,
            'perception': TestAgentPerception,
            'all': [
                TestAgentInit,
                TestAgentAction,
                TestAgentActionBasic,
                TestAgentReasoning,
                TestAgentPlanning,
                TestAgentPerception
            ]
        }
    }

    if len(sys.argv) > 1:
        category = sys.argv[1].lower()  # env or agent
        
        if len(sys.argv) > 2:
            component = sys.argv[2].lower()  # specific component or 'all'
        else:
            component = 'all'
            
        if category in test_categories:
            if component == 'all':
                # Run all tests in category
                print(f"\n=== Running All {category.title()} Tests ===")
                for test_class in test_categories[category]['all']:
                    run_specific_test(test_class)
            elif component in test_categories[category]:
                # Run specific component test
                print(f"\n=== Running {category.title()} {component.title()} Test ===")
                run_specific_test(test_categories[category][component])
            else:
                print(f"Unknown {category} test component. Available tests:")
                print(list(test_categories[category].keys()))
        else:
            print("Available test categories:")
            print(list(test_categories.keys()))
    else:
        # Run all tests from all categories
        print("\n=== Running All Tests ===")
        for category, tests in test_categories.items():
            print(f"\n=== {category.title()} Tests ===")
            for test_class in tests['all']:
                run_specific_test(test_class) 