"""
Dung Beetle Optimizer (DBO) Algorithm Implementation

This implementation is based on the paper:
"Dung beetle optimizer: a new meta-heuristic algorithm for global optimization"

The algorithm mimics the behavior of dung beetles in nature:
1. Random flight (exploration)
2. Rolling behavior (exploitation)
3. Communication behavior (information sharing)
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable, List, Tuple


class DungBeetleOptimizer:
    def __init__(
        self,
        objective_function: Callable,
        dimensions: int,
        population_size: int = 30,
        max_iterations: int = 100,
        lower_bound: float = -100.0,
        upper_bound: float = 100.0,
        rolling_factor: float = 0.5,
        communication_probability: float = 0.3,
    ):
        """
        Initialize the Dung Beetle Optimizer
        
        Args:
            objective_function: The function to be minimized
            dimensions: Number of dimensions of the problem
            population_size: Number of beetles in the population
            max_iterations: Maximum number of iterations
            lower_bound: Lower bound of the search space
            upper_bound: Upper bound of the search space
            rolling_factor: Factor controlling the rolling behavior (exploitation)
            communication_probability: Probability of communication between beetles
        """
        self.objective_function = objective_function
        self.dimensions = dimensions
        self.population_size = population_size
        self.max_iterations = max_iterations
        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self.rolling_factor = rolling_factor
        self.communication_probability = communication_probability
        
        # Initialize population
        self.population = np.random.uniform(
            low=lower_bound,
            high=upper_bound,
            size=(population_size, dimensions)
        )
        
        # Evaluate initial population
        self.fitness = np.array([self.objective_function(beetle) for beetle in self.population])
        
        # Keep track of the best solution
        self.best_index = np.argmin(self.fitness)
        self.best_position = self.population[self.best_index].copy()
        self.best_fitness = self.fitness[self.best_index]
        
        # History for convergence analysis
        self.convergence_curve = np.zeros(max_iterations)
    
    def random_flight(self, beetle_index: int) -> None:
        """
        Random flight behavior (exploration)
        
        Args:
            beetle_index: Index of the beetle in the population
        """
        # Generate random step
        random_step = np.random.uniform(
            low=-1.0,
            high=1.0,
            size=self.dimensions
        )
        
        # Update position
        self.population[beetle_index] += random_step
        
        # Boundary check
        self.population[beetle_index] = np.clip(
            self.population[beetle_index],
            self.lower_bound,
            self.upper_bound
        )
    
    def rolling_behavior(self, beetle_index: int) -> None:
        """
        Rolling behavior (exploitation)
        
        Args:
            beetle_index: Index of the beetle in the population
        """
        # Move towards the best position
        self.population[beetle_index] += self.rolling_factor * np.random.random() * (
            self.best_position - self.population[beetle_index]
        )
        
        # Boundary check
        self.population[beetle_index] = np.clip(
            self.population[beetle_index],
            self.lower_bound,
            self.upper_bound
        )
    
    def communication_behavior(self, beetle_index: int) -> None:
        """
        Communication behavior (information sharing)
        
        Args:
            beetle_index: Index of the beetle in the population
        """
        if np.random.random() < self.communication_probability:
            # Select a random beetle to communicate with
            other_beetle_index = np.random.randint(0, self.population_size)
            
            # Skip if selected itself
            if other_beetle_index == beetle_index:
                return
            
            # If the other beetle has better fitness, move towards it
            if self.fitness[other_beetle_index] < self.fitness[beetle_index]:
                self.population[beetle_index] += self.rolling_factor * np.random.random() * (
                    self.population[other_beetle_index] - self.population[beetle_index]
                )
                
                # Boundary check
                self.population[beetle_index] = np.clip(
                    self.population[beetle_index],
                    self.lower_bound,
                    self.upper_bound
                )
    
    def optimize(self) -> Tuple[np.ndarray, float, List[float]]:
        """
        Run the optimization process
        
        Returns:
            Tuple containing:
            - Best position found
            - Best fitness value
            - Convergence curve
        """
        for iteration in range(self.max_iterations):
            for i in range(self.population_size):
                # Random flight (exploration)
                self.random_flight(i)
                
                # Rolling behavior (exploitation)
                self.rolling_behavior(i)
                
                # Communication behavior
                self.communication_behavior(i)
                
                # Evaluate new position
                self.fitness[i] = self.objective_function(self.population[i])
                
                # Update best solution if needed
                if self.fitness[i] < self.best_fitness:
                    self.best_position = self.population[i].copy()
                    self.best_fitness = self.fitness[i]
            
            # Store best fitness for convergence analysis
            self.convergence_curve[iteration] = self.best_fitness
            
            # Optional: Print progress
            if (iteration + 1) % 10 == 0 or iteration == 0:
                print(f"Iteration {iteration + 1}/{self.max_iterations}, Best fitness: {self.best_fitness}")
        
        return self.best_position, self.best_fitness, self.convergence_curve


# Example usage with test functions
def sphere_function(x):
    """Sphere function (minimum at origin)"""
    return np.sum(x**2)


def rosenbrock_function(x):
    """Rosenbrock function"""
    return np.sum(100.0 * (x[1:] - x[:-1]**2)**2 + (x[:-1] - 1)**2)


def rastrigin_function(x):
    """Rastrigin function"""
    return 10 * len(x) + np.sum(x**2 - 10 * np.cos(2 * np.pi * x))


def griewank_function(x):
    """Griewank function"""
    sum_part = np.sum(x**2) / 4000
    prod_part = np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1))))
    return 1 + sum_part - prod_part


def ackley_function(x):
    """Ackley function"""
    term1 = -20 * np.exp(-0.2 * np.sqrt(np.sum(x**2) / len(x)))
    term2 = -np.exp(np.sum(np.cos(2 * np.pi * x)) / len(x))
    return term1 + term2 + 20 + np.exp(1)


if __name__ == "__main__":
    # Test the optimizer with different benchmark functions
    test_functions = {
        "Sphere": sphere_function,
        "Rosenbrock": rosenbrock_function,
        "Rastrigin": rastrigin_function,
        "Griewank": griewank_function,
        "Ackley": ackley_function
    }
    
    dimensions = 30
    
    # Run optimizer for each test function
    for name, func in test_functions.items():
        print(f"\nOptimizing {name} function:")
        
        optimizer = DungBeetleOptimizer(
            objective_function=func,
            dimensions=dimensions,
            population_size=50,
            max_iterations=200,
            lower_bound=-5.12 if name == "Rastrigin" else -30,
            upper_bound=5.12 if name == "Rastrigin" else 30
        )
        
        best_position, best_fitness, convergence = optimizer.optimize()
        
        print(f"Best fitness: {best_fitness}")
        
        # Plot convergence curve
        plt.figure(figsize=(10, 6))
        plt.plot(convergence)
        plt.title(f"Convergence Curve - {name} Function")
        plt.xlabel("Iteration")
        plt.ylabel("Fitness Value")
        plt.grid(True)
        plt.yscale("log")
        plt.savefig(f"{name}_convergence.png")
