"""
Improved Dung Beetle Optimizer (IDBO) Algorithm Implementation

This implementation extends the basic DBO algorithm with improvements from the paper:
"A combined improved dung beetle optimization and extreme learning machine framework for precise SOC estimation"

Key improvements:
1. Circle chaotic mapping for initialization
2. Golden sine strategy for rolling behavior
3. Lévy flight for communication behavior
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable, List, Tuple
import scipy.special


class ImprovedDungBeetleOptimizer:
    def __init__(
        self,
        objective_function: Callable,
        dimensions: int,
        population_size: int = 30,
        max_iterations: int = 100,
        lower_bound: float = -100.0,
        upper_bound: float = 100.0,
        initial_rolling_factor: float = 0.5,
        communication_probability: float = 0.3,
        elite_group_size: int = 5,
        opposition_probability: float = 0.1,
        levy_alpha: float = 1.5,
        golden_ratio: float = 1.618,
    ):
        """
        Initialize the Improved Dung Beetle Optimizer
        
        Args:
            objective_function: The function to be minimized
            dimensions: Number of dimensions of the problem
            population_size: Number of beetles in the population
            max_iterations: Maximum iterations for optimization
            lower_bound: Lower bound of the search space
            upper_bound: Upper bound of the search space
            initial_rolling_factor: Initial factor for rolling behavior
            communication_probability: Probability of communication
            elite_group_size: Size of the elite group
            opposition_probability: Probability of opposition-based learning
            levy_alpha: Parameter for Lévy flight (stability parameter)
            golden_ratio: Golden ratio for sine strategy (≈1.618)
        """
        self.objective_function = objective_function
        self.dimensions = dimensions
        self.population_size = population_size
        self.max_iterations = max_iterations
        self.lower_bound = lower_bound
        self.upper_bound = upper_bound
        self.rolling_factor = initial_rolling_factor
        self.communication_probability = communication_probability
        self.elite_group_size = elite_group_size
        self.opposition_probability = opposition_probability
        self.levy_alpha = levy_alpha
        self.golden_ratio = golden_ratio
        
        # Initialize population with circle chaotic mapping
        self.population = self._circle_chaotic_initialization()
        
        # Evaluate initial population
        self.fitness = np.array([self.objective_function(beetle) for beetle in self.population])
        
        # Keep track of the best solution
        self.best_index = np.argmin(self.fitness)
        self.best_position = self.population[self.best_index].copy()
        self.best_fitness = self.fitness[self.best_index]
        
        # History for convergence analysis
        self.convergence_curve = np.zeros(max_iterations)
        
        # Elite group
        self.elite_indices = np.argsort(self.fitness)[:self.elite_group_size]
        self.elite_positions = self.population[self.elite_indices].copy()
        self.elite_fitness = self.fitness[self.elite_indices].copy()
    
    def _circle_chaotic_initialization(self) -> np.ndarray:
        """
        Initialize population using circle chaotic mapping for better diversity
        as described in the paper (equation 5)
        
        Returns:
            Initialized population
        """
        population = np.zeros((self.population_size, self.dimensions))
        
        # Initialize chaotic variables
        z = np.random.random(self.population_size)
        
        for i in range(self.population_size):
            # Generate chaotic sequence using circle map
            x = np.zeros(self.dimensions)
            z_i = z[i]
            
            for j in range(self.dimensions):
                # Circle chaotic map equation: z_{n+1} = (z_n + Ω - K/(2π) * sin(2πz_n)) mod 1
                # Using simplified parameters: Ω = 0.5, K = 1
                z_i = (z_i + 0.5 - (1/(2*np.pi)) * np.sin(2*np.pi*z_i)) % 1
                x[j] = z_i
            
            # Map to search space
            population[i] = self.lower_bound + (self.upper_bound - self.lower_bound) * x
        
        return population
    
    def _levy_flight(self) -> np.ndarray:
        """
        Generate step using Lévy flight distribution
        as described in the paper (equations 7-9)
        
        Returns:
            Step vector following Lévy distribution
        """
        # Implementation of Lévy flight
        sigma = (scipy.special.gamma(1 + self.levy_alpha) * np.sin(np.pi * self.levy_alpha / 2) / 
                (scipy.special.gamma((1 + self.levy_alpha) / 2) * self.levy_alpha * 2**((self.levy_alpha - 1) / 2)))**(1 / self.levy_alpha)
        
        # Generate step from Lévy distribution
        u = np.random.normal(0, sigma, self.dimensions)
        v = np.random.normal(0, 1, self.dimensions)
        step = u / (np.abs(v)**(1 / self.levy_alpha))
        
        return step
    
    def _update_rolling_factor(self, iteration: int) -> None:
        """
        Update rolling factor adaptively based on iteration progress
        
        Args:
            iteration: Current iteration number
        """
        # Linear decrease from 0.9 to 0.1
        self.rolling_factor = 0.9 - 0.8 * (iteration / self.max_iterations)
    
    def random_flight(self, beetle_index: int) -> None:
        """
        Random flight behavior (exploration)
        
        Args:
            beetle_index: Index of the beetle in the population
        """
        # Generate random step with decreasing step size over iterations
        random_step = np.random.uniform(
            low=-1.0,
            high=1.0,
            size=self.dimensions
        ) * (1 - self.rolling_factor)
        
        # Update position
        self.population[beetle_index] += random_step
        
        # Boundary check
        self.population[beetle_index] = np.clip(
            self.population[beetle_index],
            self.lower_bound,
            self.upper_bound
        )
    
    def rolling_behavior(self, beetle_index: int, iteration: int) -> None:
        """
        Rolling behavior with golden sine strategy (exploitation)
        as described in the paper (equation 6)
        
        Args:
            beetle_index: Index of the beetle in the population
            iteration: Current iteration number
        """
        # Calculate progress ratio
        t = iteration / self.max_iterations
        
        # Golden sine strategy
        sine_factor = np.sin(np.pi * t / 2)
        golden_factor = self.golden_ratio * sine_factor
        
        # Move towards the best position with golden sine strategy
        self.population[beetle_index] += self.rolling_factor * golden_factor * (
            self.best_position - self.population[beetle_index]
        )
        
        # Boundary check
        self.population[beetle_index] = np.clip(
            self.population[beetle_index],
            self.lower_bound,
            self.upper_bound
        )
    
    def communication_behavior(self, beetle_index: int, iteration: int) -> None:
        """
        Communication behavior with Lévy flight (information sharing)
        as described in the paper (equations 7-9)
        
        Args:
            beetle_index: Index of the beetle in the population
            iteration: Current iteration number
        """
        if np.random.random() < self.communication_probability:
            # Select a beetle from the elite group to communicate with
            elite_index = np.random.choice(self.elite_indices)
            
            # Skip if selected itself
            if elite_index == beetle_index:
                return
            
            # Calculate progress ratio
            t = iteration / self.max_iterations
            
            # Apply Lévy flight with adaptive scaling
            levy_step = self._levy_flight()
            
            # Scale step based on iteration progress (smaller steps in later iterations)
            scale_factor = (1 - t) * 0.5
            
            # Move towards the elite beetle with Lévy flight component
            self.population[beetle_index] += self.rolling_factor * np.random.random() * (
                self.population[elite_index] - self.population[beetle_index]
            ) + scale_factor * levy_step
            
            # Boundary check
            self.population[beetle_index] = np.clip(
                self.population[beetle_index],
                self.lower_bound,
                self.upper_bound
            )
    
    def opposition_based_learning(self) -> None:
        """
        Elite opposition-based learning to enhance exploration
        """
        if np.random.random() < self.opposition_probability:
            # Select the worst beetle
            worst_index = np.argmax(self.fitness)
            
            # Create opposite position
            opposite_position = self.lower_bound + self.upper_bound - self.population[worst_index]
            
            # Evaluate the opposite position
            opposite_fitness = self.objective_function(opposite_position)
            
            # Replace if better
            if opposite_fitness < self.fitness[worst_index]:
                self.population[worst_index] = opposite_position
                self.fitness[worst_index] = opposite_fitness
                
                # Update best if needed
                if opposite_fitness < self.best_fitness:
                    self.best_position = opposite_position.copy()
                    self.best_fitness = opposite_fitness
    
    def update_elite_group(self) -> None:
        """
        Update the elite group based on current fitness values
        """
        self.elite_indices = np.argsort(self.fitness)[:self.elite_group_size]
        self.elite_positions = self.population[self.elite_indices].copy()
        self.elite_fitness = self.fitness[self.elite_indices].copy()
    
    def optimize(self) -> Tuple[np.ndarray, float, List[float]]:
        """
        Run the optimization process
        
        Returns:
            Tuple containing:
            - Best position found
            - Best fitness value
            - Convergence curve
        """
        for iteration in range(self.max_iterations):
            # Update rolling factor adaptively
            self._update_rolling_factor(iteration)
            
            for i in range(self.population_size):
                # Random flight (exploration)
                self.random_flight(i)
                
                # Rolling behavior with golden sine strategy (exploitation)
                self.rolling_behavior(i, iteration)
                
                # Communication behavior with Lévy flight
                self.communication_behavior(i, iteration)
                
                # Evaluate new position
                self.fitness[i] = self.objective_function(self.population[i])
                
                # Update best solution if needed
                if self.fitness[i] < self.best_fitness:
                    self.best_position = self.population[i].copy()
                    self.best_fitness = self.fitness[i]
            
            # Apply opposition-based learning
            self.opposition_based_learning()
            
            # Update elite group
            self.update_elite_group()
            
            # Store best fitness for convergence analysis
            self.convergence_curve[iteration] = self.best_fitness
            
            # Optional: Print progress
            if (iteration + 1) % 10 == 0 or iteration == 0:
                print(f"Iteration {iteration + 1}/{self.max_iterations}, Best fitness: {self.best_fitness}")
        
        return self.best_position, self.best_fitness, self.convergence_curve


# Example usage with test functions
def sphere_function(x):
    """Sphere function (minimum at origin)"""
    return np.sum(x**2)


def rosenbrock_function(x):
    """Rosenbrock function"""
    return np.sum(100.0 * (x[1:] - x[:-1]**2)**2 + (x[:-1] - 1)**2)


def rastrigin_function(x):
    """Rastrigin function"""
    return 10 * len(x) + np.sum(x**2 - 10 * np.cos(2 * np.pi * x))


if __name__ == "__main__":
    # Test the improved optimizer with different benchmark functions
    test_functions = {
        "Sphere": sphere_function,
        "Rosenbrock": rosenbrock_function,
        "Rastrigin": rastrigin_function,
    }
    
    dimensions = 30
    
    # Run optimizer for each test function
    for name, func in test_functions.items():
        print(f"\nOptimizing {name} function:")
        
        optimizer = ImprovedDungBeetleOptimizer(
            objective_function=func,
            dimensions=dimensions,
            population_size=50,
            max_iterations=200,
            lower_bound=-5.12 if name == "Rastrigin" else -30,
            upper_bound=5.12 if name == "Rastrigin" else 30
        )
        
        best_position, best_fitness, convergence = optimizer.optimize()
        
        print(f"Best fitness: {best_fitness}")
        
        # Plot convergence curve
        plt.figure(figsize=(10, 6))
        plt.plot(convergence)
        plt.title(f"Convergence Curve - {name} Function (Improved DBO)")
        plt.xlabel("Iteration")
        plt.ylabel("Fitness Value")
        plt.grid(True)
        plt.yscale("log")
        plt.savefig(f"{name}_improved_convergence.png")