from env import World
from agent import Agent
from viz import CognitiveVisualizer
import matplotlib.pyplot as plt

def main():
    """Demo 1: Basic Agent abilities for Perception, Planning, Reasoning and Action from simple environment with rewards"""
    print("\n=== Initializing the Game ===")
    
    # Create 3D environment
    print("\n1. Creating ONE Environment")
    world = World()
    world.create_basic_environment()
    
    # Initialize agent with high-level task
    print("\n2. Creating ONE AI Agent")
    agent = Agent()
    visualizer = CognitiveVisualizer()

    agent.current_task = {
        "type": "navigate_and_analyze",
        "objective": "Reach target while analyzing environment",
        "target_position": (100, 100, 100),
        "requirements": {
            "min_energy": 20,
            "min_knowledge": 50
        }
    }
    
    # Initial state
    print("\n3. Initial Agent State:")
    print(f"Energy: {agent.energy}/100")
    print(f"Knowledge: {agent.knowledge}/100")
    print(f"Position: {agent.position}")
    print(f"Task: {agent.current_task['type']}")
    
    # Run cognitive cycles
    print("\n4. Starting Cognitive Cycles")
    for cycle in range(3):
        print(f"\n=== Cognitive Cycle {cycle + 1} ===")
        
        # Run cognitive cycle and get metrics
        metrics = agent.think()  # Assuming think() returns metrics dictionary
        
        # Record metrics for visualization
        visualizer.record_cycle(
            position=agent.position,
            energy=agent.energy,
            knowledge=agent.knowledge,
            confidence=metrics['confidence']
        )
        
        # Update the live plot
        visualizer.update_plot()
        
        # Show current state
        print(f"\n📊 Agent Status:")
        print(f"Position: {agent.position}")
        print(f"Energy: {agent.energy}")
        print(f"Knowledge: {agent.knowledge}")
        
        if agent.energy < 20:
            print("\n⚠️ Agent needs rest - stopping cycles")
            break
    
    # Keep plot window open until closed
    plt.ioff()
    plt.show()

    # Final statistics
    print("\n=== Final Agent Status ===")
    print(f"Cycles completed: {cycle + 1}")
    print(f"Final position: {agent.position}")
    print(f"Final energy: {agent.energy}/100")
    print(f"Final knowledge: {agent.knowledge}/100")

    # Collect research metrics
    metrics = {
        "pruning_effectiveness": {
            "perception_nodes": len(agent.wisdom.perception_graph.nodes),
            "planning_nodes": len(agent.wisdom.planning_graph.nodes),
            "reasoning_nodes": len(agent.wisdom.reasoning_graph.nodes),
            "action_nodes": len(agent.wisdom.action_graph.nodes),
            "total_nodes": (len(agent.wisdom.perception_graph.nodes) + 
                          len(agent.wisdom.planning_graph.nodes) +
                          len(agent.wisdom.reasoning_graph.nodes) +
                          len(agent.wisdom.action_graph.nodes))
        },
        "performance_impact": {
            "energy_efficiency": (agent.energy / 80) * 100,  # Starting energy was 80
            "knowledge_gain": agent.knowledge - 60,  # Starting knowledge was 60
            "distance_covered": agent._calculate_distance((0,0,0), agent.position),
            "average_step_size": agent._calculate_distance((0,0,0), agent.position) / (cycle + 1)
        }
    }
    
    # Output research results
    print("\n=== Research Metrics ===")
    print(f"Pruning Effectiveness:")
    for key, value in metrics["pruning_effectiveness"].items():
        print(f"- {key}: {value}")
    print(f"\nPerformance Impact:")
    for key, value in metrics["performance_impact"].items():
        if key == "energy_efficiency":
            print(f"- {key}: {value:.1f}%")
        elif key in ["distance_covered", "average_step_size"]:
            print(f"- {key}: {value:.2f} units")
        else:
            print(f"- {key}: {value}")

if __name__ == "__main__":
    main()