"""
Comparison of original DBO and improved DBO algorithms using MAE and RMSE metrics
"""

import numpy as np
import matplotlib.pyplot as plt
import time
from dbo import DungBeetleOptimizer
from improved_dbo import ImprovedDungBeetleOptimizer
from sklearn.metrics import mean_squared_error, mean_absolute_error


def run_comparison(function_name, function, dimensions=30, runs=5):
    """
    Compare original DBO and improved DBO on a test function using MAE and RMSE metrics
    
    Args:
        function_name: Name of the function
        function: The objective function
        dimensions: Number of dimensions
        runs: Number of independent runs
    """
    print(f"\n{'=' * 60}")
    print(f"Comparing algorithms on {function_name} function ({dimensions} dimensions)")
    print(f"{'=' * 60}")
    
    # Set bounds based on function
    if function_name == "Rastrigin":
        lower_bound, upper_bound = -5.12, 5.12
    else:
        lower_bound, upper_bound = -30, 30
    
    # Results storage
    dbo_best_positions = []
    dbo_best_fitnesses = []
    dbo_times = []
    dbo_maes = []
    dbo_rmses = []
    
    idbo_best_positions = []
    idbo_best_fitnesses = []
    idbo_times = []
    idbo_maes = []
    idbo_rmses = []
    
    # Parameters
    population_size = 50
    max_iterations = 100
    
    # Generate ground truth solution (global optimum)
    # For these test functions, the global optimum is at origin (all zeros) except for Rosenbrock
    if function_name == "Rosenbrock":
        ground_truth = np.ones(dimensions)  # Global optimum at [1, 1, ..., 1]
    else:
        ground_truth = np.zeros(dimensions)  # Global optimum at origin
    
    # Run multiple times to get statistical results
    for run in range(runs):
        print(f"\nRun {run + 1}/{runs}:")
        
        # Original DBO
        start_time = time.time()
        dbo = DungBeetleOptimizer(
            objective_function=function,
            dimensions=dimensions,
            population_size=population_size,
            max_iterations=max_iterations,
            lower_bound=lower_bound,
            upper_bound=upper_bound
        )
        
        dbo_best_position, dbo_best_fitness, dbo_convergence = dbo.optimize()
        dbo_time = time.time() - start_time
        
        # Calculate MAE and RMSE between found solution and ground truth
        dbo_mae = mean_absolute_error([ground_truth], [dbo_best_position])
        dbo_rmse = np.sqrt(mean_squared_error([ground_truth], [dbo_best_position]))
        
        dbo_best_positions.append(dbo_best_position)
        dbo_best_fitnesses.append(dbo_best_fitness)
        dbo_times.append(dbo_time)
        dbo_maes.append(dbo_mae)
        dbo_rmses.append(dbo_rmse)
        
        print(f"Original DBO - Best fitness: {dbo_best_fitness:.6f}, MAE: {dbo_mae:.6f}, RMSE: {dbo_rmse:.6f}, Time: {dbo_time:.2f}s")
        
        # Improved DBO
        start_time = time.time()
        idbo = ImprovedDungBeetleOptimizer(
            objective_function=function,
            dimensions=dimensions,
            population_size=population_size,
            max_iterations=max_iterations,
            lower_bound=lower_bound,
            upper_bound=upper_bound
        )
        
        idbo_best_position, idbo_best_fitness, idbo_convergence = idbo.optimize()
        idbo_time = time.time() - start_time
        
        # Calculate MAE and RMSE between found solution and ground truth
        idbo_mae = mean_absolute_error([ground_truth], [idbo_best_position])
        idbo_rmse = np.sqrt(mean_squared_error([ground_truth], [idbo_best_position]))
        
        idbo_best_positions.append(idbo_best_position)
        idbo_best_fitnesses.append(idbo_best_fitness)
        idbo_times.append(idbo_time)
        idbo_maes.append(idbo_mae)
        idbo_rmses.append(idbo_rmse)
        
        print(f"Improved DBO - Best fitness: {idbo_best_fitness:.6f}, MAE: {idbo_mae:.6f}, RMSE: {idbo_rmse:.6f}, Time: {idbo_time:.2f}s")
        
        # Plot convergence curves for the first run
        if run == 0:
            plt.figure(figsize=(12, 6))
            plt.plot(dbo_convergence, label='Original DBO')
            plt.plot(idbo_convergence, label='Improved DBO')
            plt.title(f"Convergence Comparison - {function_name} Function")
            plt.xlabel("Iteration")
            plt.ylabel("Fitness Value")
            plt.grid(True)
            plt.yscale("log")
            plt.legend()
            plt.savefig(f"{function_name}_comparison.png")
            
            # Plot error metrics comparison
            plt.figure(figsize=(10, 6))
            bar_width = 0.35
            index = np.arange(2)
            
            plt.bar(index, [dbo_mae, idbo_mae], bar_width, label='MAE')
            plt.bar(index + bar_width, [dbo_rmse, idbo_rmse], bar_width, label='RMSE')
            
            plt.xlabel('Algorithm')
            plt.ylabel('Error')
            plt.title(f'MAE and RMSE Comparison - {function_name} Function')
            plt.xticks(index + bar_width / 2, ('Original DBO', 'Improved DBO'))
            plt.legend()
            plt.grid(True, axis='y')
            plt.savefig(f"{function_name}_error_metrics.png")
    
    # Convert to numpy arrays for easier statistics
    dbo_best_fitnesses = np.array(dbo_best_fitnesses)
    idbo_best_fitnesses = np.array(idbo_best_fitnesses)
    dbo_times = np.array(dbo_times)
    idbo_times = np.array(idbo_times)
    dbo_maes = np.array(dbo_maes)
    idbo_maes = np.array(idbo_maes)
    dbo_rmses = np.array(dbo_rmses)
    idbo_rmses = np.array(idbo_rmses)
    
    # Report statistics
    print("\nStatistics:")
    print("\nOriginal DBO:")
    print(f"Best fitness: {np.min(dbo_best_fitnesses):.6f}")
    print(f"Mean fitness: {np.mean(dbo_best_fitnesses):.6f}")
    print(f"Mean MAE: {np.mean(dbo_maes):.6f}")
    print(f"Mean RMSE: {np.mean(dbo_rmses):.6f}")
    print(f"Std dev (MAE): {np.std(dbo_maes):.6f}")
    print(f"Std dev (RMSE): {np.std(dbo_rmses):.6f}")
    print(f"Mean time: {np.mean(dbo_times):.2f}s")
    
    print("\nImproved DBO:")
    print(f"Best fitness: {np.min(idbo_best_fitnesses):.6f}")
    print(f"Mean fitness: {np.mean(idbo_best_fitnesses):.6f}")
    print(f"Mean MAE: {np.mean(idbo_maes):.6f}")
    print(f"Mean RMSE: {np.mean(idbo_rmses):.6f}")
    print(f"Std dev (MAE): {np.std(idbo_maes):.6f}")
    print(f"Std dev (RMSE): {np.std(idbo_rmses):.6f}")
    print(f"Mean time: {np.mean(idbo_times):.2f}s")
    
    # Calculate improvement percentages
    mae_improvement = (np.mean(dbo_maes) - np.mean(idbo_maes)) / np.mean(dbo_maes) * 100
    rmse_improvement = (np.mean(dbo_rmses) - np.mean(idbo_rmses)) / np.mean(dbo_rmses) * 100
    fitness_improvement = (np.mean(dbo_best_fitnesses) - np.mean(idbo_best_fitnesses)) / np.mean(dbo_best_fitnesses) * 100
    
    print(f"\nImprovement percentages:")
    print(f"MAE improvement: {mae_improvement:.2f}%")
    print(f"RMSE improvement: {rmse_improvement:.2f}%")
    print(f"Fitness improvement: {fitness_improvement:.2f}%")
    
    # Create summary plot for all runs
    plt.figure(figsize=(14, 8))
    
    # Plot MAE comparison
    plt.subplot(1, 2, 1)
    plt.boxplot([dbo_maes, idbo_maes], tick_labels=['Original DBO', 'Improved DBO'])
    plt.title(f'MAE Comparison - {function_name} Function')
    plt.ylabel('Mean Absolute Error')
    plt.grid(True, axis='y')
    
    # Plot RMSE comparison
    plt.subplot(1, 2, 2)
    plt.boxplot([dbo_rmses, idbo_rmses], tick_labels=['Original DBO', 'Improved DBO'])
    plt.title(f'RMSE Comparison - {function_name} Function')
    plt.ylabel('Root Mean Squared Error')
    plt.grid(True, axis='y')
    
    plt.tight_layout()
    plt.savefig(f"{function_name}_error_boxplot.png")


def sphere_function(x):
    """Sphere function (minimum at origin)"""
    return np.sum(x**2)


def rosenbrock_function(x):
    """Rosenbrock function"""
    return np.sum(100.0 * (x[1:] - x[:-1]**2)**2 + (x[:-1] - 1)**2)


def rastrigin_function(x):
    """Rastrigin function"""
    return 10 * len(x) + np.sum(x**2 - 10 * np.cos(2 * np.pi * x))


def griewank_function(x):
    """Griewank function"""
    sum_part = np.sum(x**2) / 4000
    prod_part = np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1))))
    return 1 + sum_part - prod_part


def ackley_function(x):
    """Ackley function"""
    term1 = -20 * np.exp(-0.2 * np.sqrt(np.sum(x**2) / len(x)))
    term2 = -np.exp(np.sum(np.cos(2 * np.pi * x)) / len(x))
    return term1 + term2 + 20 + np.exp(1)


if __name__ == "__main__":
    # Define test functions
    test_functions = {
        "Sphere": sphere_function,
        "Rosenbrock": rosenbrock_function,
        "Rastrigin": rastrigin_function,
        "Griewank": griewank_function,
        "Ackley": ackley_function
    }
    
    # Test dimensions
    dimensions = 30
    
    # Number of independent runs for statistical analysis
    runs = 5
    
    # Run comparisons for each function
    for name, func in test_functions.items():
        run_comparison(name, func, dimensions, runs)