import numpy as np import math import matplotlib.pyplot as plt from io import BytesIO from PIL import Image import gradio as gr from mpl_toolkits.mplot3d import Axes3D class Objective: def Evaluate(self, p): return -5.0*np.exp(-0.5*((p[0]+2.2)**2/0.4+(p[1]-4.3)**2/0.4)) + -2.0*np.exp(-0.5*((p[0]-2.2)**2/0.4+(p[1]+4.3)**2/0.4)) # Create an instance of the Objective class obj = Objective() # Evaluate the fitness of a position position = np.array([-2.2, 4.3]) fitness = obj.Evaluate(position) print(f"The fitness of the position {position} is {fitness}") class Bounds: def __init__(self, lower, upper, enforce="clip"): self.lower = np.array(lower) self.upper = np.array(upper) self.enforce = enforce.lower() def Upper(self): return self.upper def Lower(self): return self.lower def Limits(self, pos): npart, ndim = pos.shape for i in range(npart): for j in range(ndim): if pos[i, j] < self.lower[j]: if self.enforce == "clip": pos[i, j] = self.lower[j] elif self.enforce == "resample": pos[i, j] = self.lower[j] + np.random.random() * (self.upper[j] - self.lower[j]) elif pos[i, j] > self.upper[j]: if self.enforce == "clip": pos[i, j] = self.upper[j] elif self.enforce == "resample": pos[i, j] = self.lower[j] + np.random.random() * (self.upper[j] - self.lower[j]) pos[i] = self.Validate(pos[i]) return pos def Validate(self, pos): return pos # Define the bounds lower_bounds = [-6, -6] upper_bounds = [6, 6] # Create an instance of the Bounds class bounds = Bounds(lower_bounds, upper_bounds, enforce="clip") # Define a set of positions positions = np.array([[15, 15], [-15, -15], [5, 15], [15, 5]]) # Enforce the bounds on the positions valid_positions = bounds.Limits(positions) print(f"Valid positions: {valid_positions}") # Define the bounds lower_bounds = [-6, -6] upper_bounds = [6, 6] # Create an instance of the Bounds class bounds = Bounds(lower_bounds, upper_bounds, enforce="resample") # Define a set of positions positions = np.array([[15, 15], [-15, -15], [5, 15], [15, 5]]) # Enforce the bounds on the positions valid_positions = bounds.Limits(positions) print(f"Valid positions: {valid_positions}") class QuasiRandomInitializer: def __init__(self, npart=10, ndim=2, bounds=None, k=1, jitter=0.0): self.npart = npart self.ndim = ndim self.bounds = bounds self.k = k self.jitter = jitter self.primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659 ] def Halton(self, i, b): f = 1.0 r = 0 while i > 0: f = f / b r = r + f * (i % b) i = math.floor(i / b) return r def InitializeSwarm(self): self.swarm = np.zeros((self.npart, self.ndim)) lo = np.zeros(self.ndim) hi = np.ones(self.ndim) if self.bounds is not None: lo = self.bounds.Lower() hi = self.bounds.Upper() for i in range(self.npart): for j in range(self.ndim): h = self.Halton(i + self.k, self.primes[j % len(self.primes)]) q = self.jitter * (np.random.random() - 0.5) self.swarm[i, j] = lo[j] + (hi[j] - lo[j]) * h + q if self.bounds is not None: self.swarm = self.bounds.Limits(self.swarm) return self.swarm # Define the bounds lower_bounds = [-6, -6] upper_bounds = [6, 6] bounds = Bounds(lower_bounds, upper_bounds, enforce="clip") # Create an instance of the QuasiRandomInitializer class init = QuasiRandomInitializer(npart=50, ndim=2, bounds=bounds) # Initialize the swarm swarm_positions = init.InitializeSwarm() print(f"Initial swarm positions: {swarm_positions}") # Define the bounds lower_bounds = [-6, -6] upper_bounds = [6, 6] bounds = Bounds(lower_bounds, upper_bounds, enforce="resample") # Create an instance of the QuasiRandomInitializer class init = QuasiRandomInitializer(npart=50, ndim=2, bounds=bounds) # Initialize the swarm swarm_positions = init.InitializeSwarm() print(f"Initial swarm positions: {swarm_positions}") class GWO: def __init__(self, obj, eta=2.0, npart=10, ndim=2, max_iter=200,tol=None,init=None,done=None,bounds=None): self.obj = obj self.npart = npart self.ndim = ndim self.max_iter = max_iter self.init = init self.done = done self.bounds = bounds self.tol = tol self.eta = eta self.initialized = False def Initialize(self): """Set up the swarm""" self.initialized = True self.iterations = 0 self.pos = self.init.InitializeSwarm() # initial swarm positions self.vpos= np.zeros(self.npart) for i in range(self.npart): self.vpos[i] = self.obj.Evaluate(self.pos[i]) # Initialize the list to store positions at each iteration self.all_positions = [] self.all_positions.append(self.pos.copy()) # Store the initial positions # Swarm bests self.gidx = [] self.gbest = [] self.gpos = [] self.giter = [] idx = np.argmin(self.vpos) self.gidx.append(idx) self.gbest.append(self.vpos[idx]) self.gpos.append(self.pos[idx].copy()) self.giter.append(0) # 1st, 2nd, and 3rd best positions idx = np.argsort(self.vpos) self.alpha = self.pos[idx[0]].copy() self.valpha= self.vpos[idx[0]] self.beta = self.pos[idx[1]].copy() self.vbeta = self.vpos[idx[1]] self.delta = self.pos[idx[2]].copy() self.vdelta= self.vpos[idx[2]] # *** Gradio app method optimize created [leveraged vis-a-vis optimize function on the outside of the underlying anatomy of GWO class] *** def optimize(self): """ Run a full optimization and return the best positions and fitness values. This method is designed to be used with Gradio. """ # Initialize the swarm self.Initialize() # Lists to store the best positions and fitness values at each step best_positions = [] best_fitness = [] # Main loop while not self.Done(): self.Step() # Perform an optimization step # Update best_positions and best_fitness with the current best values best_positions.append(self.gbest[-1]) best_fitness.append(self.gpos[-1]) # Print the best positions and fitness found print("Best Positions:", best_positions) print("Best Fitness:", best_fitness) # Return the best positions and fitness after the optimization return best_positions, best_fitness def Step(self): """Do one swarm step""" # a from eta ... zero (default eta is 2) a = self.eta - self.eta*(self.iterations/self.max_iter) # Update everyone for i in range(self.npart): A = 2*a*np.random.random(self.ndim) - a C = 2*np.random.random(self.ndim) Dalpha = np.abs(C*self.alpha - self.pos[i]) X1 = self.alpha - A*Dalpha A = 2*a*np.random.random(self.ndim) - a C = 2*np.random.random(self.ndim) Dbeta = np.abs(C*self.beta - self.pos[i]) X2 = self.beta - A*Dbeta A = 2*a*np.random.random(self.ndim) - a C = 2*np.random.random(self.ndim) Ddelta = np.abs(C*self.delta - self.pos[i]) X3 = self.delta - A*Ddelta self.pos[i,:] = (X1+X2+X3) / 3.0 # Keep in bounds if (self.bounds != None): self.pos = self.bounds.Limits(self.pos) # Get objective function values and check for new leaders for i in range(self.npart): self.vpos[i] = self.obj.Evaluate(self.pos[i]) # new alpha? if (self.vpos[i] < self.valpha): self.vdelta = self.vbeta self.delta = self.beta.copy() self.vbeta = self.valpha self.beta = self.alpha.copy() self.valpha = self.vpos[i] self.alpha = self.pos[i].copy() # new beta? if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta): self.vdelta = self.vbeta self.delta = self.beta.copy() self.vbeta = self.vpos[i] self.beta = self.pos[i].copy() # new delta? if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta) and (self.vpos[i] < self.vdelta): self.vdelta = self.vpos[i] self.delta = self.pos[i].copy() # is alpha new swarm best? if (self.valpha < self.gbest[-1]): self.gidx.append(i) self.gbest.append(self.valpha) np.save('best_fitness.npy', np.array(self.gbest)) self.gpos.append(self.alpha.copy()) np.save('best_positions.npy', np.array(self.gpos)) # Save the positions at the current iteration self.all_positions.append(self.pos.copy()) np.save('all_positions.npy', np.array(self.all_positions), allow_pickle=True) self.giter.append(self.iterations) self.iterations += 1 def Done(self): """Check if we are done""" if (self.done == None): if (self.tol == None): return (self.iterations == self.max_iter) else: return (self.gbest[-1] < self.tol) or (self.iterations == self.max_iter) else: return self.done.Done(self.gbest, gpos=self.gpos, pos=self.pos, max_iter=self.max_iter, iteration=self.iterations) def Evaluate(self, pos): p = np.zeros(self.npart) for i in range(self.npart): p[i] = self.obj.Evaluate(pos[i]) return p def Results(self): if (not self.initialized): return None return { "npart": self.npart, "ndim": self.ndim, "max_iter": self.max_iter, "iterations": self.iterations, "tol": self.tol, "eta": self.eta, "gbest": self.gbest, "giter": self.giter, "gpos": self.gpos, "gidx": self.gidx, "pos": self.pos, "vpos": self.vpos } #def plot_positions(self): #"""Plot the positions of the particles over the iterations""" # Get the pos data from the Results method #results = self.Results() #if results is None: #print("The GWO algorithm has not been initialized or run yet.") #return #pos = results['pos'] #if not any(pos): # Check if pos is empty #print("No positions to plot.") #return # Create a 2x2 grid of subplots #fig, axs = plt.subplots(2, 2, figsize=(12, 12)) #axs = axs.flatten() # Determine the number of iterations to plot #num_iterations = min(len(pos), len(axs)) # Iterate over the subplots and the iterations to plot #for i in range(num_iterations): # Plot the particles' positions at the current iteration #positions = pos[i] #axs[i].scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Plot the goal position #axs[i].scatter([-2.2], [4.3], c='g', marker='s', s=100, label='Goal') # Set plot title and labels #axs[i].set_title(f'Iteration {i}') #axs[i].set_xlabel('X') #axs[i].set_ylabel('Y') #axs[i].legend() # Hide any unused subplots #for i in range(num_iterations, len(axs)): #axs[i].axis('off') #plt.tight_layout() #plt.show() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image # Updated plot_positions method with a check ensuring that the `self.all_positions` list has enough elements # before trying to access them. The new logic admonished into the method plot_positions: # *********** New Logic with validation ********************* # Check if the self.all_positions list has enough elements # if len(self.all_positions) <= max(iterations_to_plot): # print("The self.all_positions list does not have enough elements to plot the specified iterations.") # return # Reflects positions of a set of particles (wolves) over SPECIFIC iterations # Here the data visulization captures wolves positions at iterations: 0, 50, 150 and 199 # Producing a 2 x 2 grid validating the optimization accurately for the GWO algorithm. #def plot_positions(self): #"""Plot the positions of the particles over the iterations""" # Define the iterations you want to plot #iterations_to_plot = [0, 25, 45, 65] # Check if the self.all_positions list has enough elements #if len(self.all_positions) <= max(iterations_to_plot): #print("The self.all_positions list does not have enough elements to plot the specified iterations.") #return # Create a 2x2 grid of subplots #fig, axs = plt.subplots(2, 2, figsize=(12, 12)) #axs = axs.flatten() # Iterate over the subplots and the iterations to plot #for i, iteration in enumerate(iterations_to_plot): # Plot the particles' positions at the current iteration #positions = self.all_positions[iteration] #axs[i].scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Plot the goal position #axs[i].scatter([-2.2], [4.3], c='g', marker='s', s=100, label='Goal') # Set plot title and labels #axs[i].set_title(f'Iteration {iteration}') #axs[i].set_xlabel('X') #axs[i].set_ylabel('Y') #axs[i].legend() #plt.tight_layout() #plt.show() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image # Reflects positions of a set of particles (wolves) over SPECIFIC iterations # Here the data visulization captures wolves positions at iterations: 0, 50, 150 and 199 # Producing a 2 x 2 grid validating the optimization accurately for the GWO algorithm. #def plot_positions(self): #"""Plot the positions of the particles over the iterations""" # Define the iterations you want to plot #iterations_to_plot = [0, 50, 150, 199] # Create a 2x2 grid of subplots #fig, axs = plt.subplots(2, 2, figsize=(10, 10)) #axs = axs.flatten() # Iterate over the subplots and the iterations to plot #for i, iteration in enumerate(iterations_to_plot): # Plot the particles' positions at the current iteration #positions = self.all_positions[iteration] #axs[i].scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Plot the goal position #axs[i].scatter([-2.2], [4.3], c='g', marker='s', s=100, label='Goal') # Set plot title and labels #axs[i].set_title(f'Iteration {iteration}') #axs[i].set_xlabel('X') #axs[i].set_ylabel('Y') #axs[i].legend() #plt.tight_layout() #plt.show() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image # Plots all the positions obverse iterations 0-199 (not a subplot capture) simply ALL iterations #def plot_positions(self): #"""Plot the positions of the particles over the iterations""" #fig, axs = plt.subplots(len(self.all_positions), 1, figsize=(10, 5*len(self.all_positions))) #for i, positions in enumerate(self.all_positions): # Plot the particles' positions at the current iteration #axs[i].scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Plot the goal position #axs[i].scatter([-2.2], [4.3], c='g', marker='s', s=100, label='Goal') # Set plot title and labels #axs[i].set_title(f'Iteration {i}') #axs[i].set_xlabel('X') #axs[i].set_ylabel('Y') #axs[i].legend() #plt.tight_layout() #plt.show() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image # Plots only iterations 0,1,2,3 Captures subplots of the first four only #def plot_positions(self): #"""Plot the positions of the particles over the iterations""" #fig, axs = plt.subplots(2, 2, figsize=(10, 10)) #axs = axs.flatten() #for i, ax in enumerate(axs): # Plot the particles' positions at the current iteration #positions = self.all_positions[i] #ax.scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Plot the goal position #ax.scatter([-2.2], [4.3], c='g', marker='s', s=100, label='Goal') # Set plot title and labels #ax.set_title(f'Iteration {i}') #ax.set_xlabel('X') #ax.set_ylabel('Y') #ax.legend() #plt.tight_layout() #plt.show() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image def plot_positions(self, iterations=[0, 3, 7, 11]): """Plot the positions of the particles over the specified iterations""" # Load the all_positions data from the .npy file all_positions = np.load('all_positions.npy', allow_pickle=True) # Create a figure with the correct number of subplots num_iterations = len(iterations) fig, axs = plt.subplots(num_iterations, figsize=(6, 4 * num_iterations)) # If there is only one subplot, make it an array to simplify the loop if num_iterations == 1: axs = [axs] # Iterate over the subplots and the specified iterations to plot for i, ax in enumerate(axs): # Plot the particles' positions at the specified iteration iteration = iterations[i] if iteration < len(all_positions): positions = all_positions[iteration] ax.scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Set plot title and labels ax.set_title(f'Iteration {iteration}') ax.set_xlabel('X') ax.set_ylabel('Y') else: ax.axis('off') # Hide the subplot if the iteration is out of range plt.tight_layout() # Save the plot to a BytesIO object buf = BytesIO() plt.savefig(buf, format='png') buf.seek(0) image = Image.open(buf) plt.close() return image # Debug plot_positions method (3 to 4 subplots appear if iterations=[0,3,7,11]) #def plot_positions(self, iterations=[0, 5, 10, 15]): # Load the all_positions data from the .npy file #all_positions = np.load('all_positions.npy', allow_pickle=True) # Debugging: print the shape of all_positions #print(f"all_positions shape: {all_positions.shape}") # Create a figure with the correct number of subplots #num_iterations = len(iterations) #fig, axs = plt.subplots(num_iterations, figsize=(6, 4 * num_iterations)) # If there is only one subplot, make it an array to simplify the loop #if num_iterations == 1: #axs = [axs] # Iterate over the subplots and the specified iterations to plot #for i, ax in enumerate(axs): # Plot the particles' positions at the specified iteration #iteration = iterations[i] #if iteration < len(all_positions): #positions = all_positions[iteration] #ax.scatter(positions[:, 0], positions[:, 1], c='b', alpha=0.5) # Set plot title and labels #ax.set_title(f'Iteration {iteration}') #ax.set_xlabel('X') #ax.set_ylabel('Y') #else: #ax.axis('off') # Hide the subplot if the iteration is out of range #plt.tight_layout() # Save the plot to a BytesIO object #buf = BytesIO() #plt.savefig(buf, format='png') #buf.seek(0) #image = Image.open(buf) #plt.close() #return image def optimize(npart, ndim, max_iter): # Initialize the GWO algorithm with the provided parameters gwo = GWO(obj=obj, npart=npart, ndim=ndim, max_iter=max_iter, init=init, bounds=bounds) # Run the optimization best_positions, best_fitness = gwo.optimize() # Get the best fitness and positions at the last iteration last_best_fitness = best_fitness[-1] last_best_positions = best_positions[-1] # Format the output strings best_fitness_text = f"Best Positions: {last_best_fitness}" best_positions_text = f"Best Fitness: {last_best_positions}" # Get the positions plot as a PIL Image positions_plot = gwo.plot_positions(iterations=[0, 3, 7, 11]) # Load the best fitness and positions from .npy files best_fitness_npy = np.load('best_fitness.npy') best_positions_npy = np.load('best_positions.npy') # Get the results from the GWO algorithm results = gwo.Results() gidx_text = f"Grey Wolf Index: {results['gidx']}" if results else "No results available." # Get the result from the GWO algorithm #results = gwo.Results() #vpos_text = f"Grey Wolf Index: {results['vpos']}" if results else "No results available." # Get the results from the GWO algorithm results = gwo.Results() pos_text = f"Grey Wolf Positions: {results['pos']}" if results else "No results available." # Return the positions plot, the text for the best fitness and positions, and the loaded data return positions_plot, best_fitness_text, best_positions_text, best_fitness_npy, best_positions_npy, gidx_text, pos_text # Define the Gradio interface iface = gr.Interface( fn=optimize, # Pass the optimize function object inputs=[ gr.components.Slider(10, 50, 50, step=1, label="Number of Wolves [Default = 50 Wolves]"), gr.components.Slider(2, 2, 2, step=1, label="Two-Dimensional Search Space"), gr.components.Slider(100, 200, 200, step=1, label="Maximum Iterations [Default = 200 Epochs]"), ], outputs=[ gr.components.Image(type="pil", label="Best Positions Plot"), gr.components.Textbox(label="Best Positions"), gr.components.Textbox(label="Best Fitness"), gr.components.Textbox(label="Loaded Best Fitness"), gr.components.Textbox(label="Loaded Best Positions"), gr.components.Textbox(label="Delta, Beta, & Alpha wolves jostling for hierarchy (all other wolves are Omega)."), # New textbox for the results gr.components.Textbox(label="Grey Wolves Positions Capture") ], title="Grey Wolf Optimizer", description=r""" ## Grey Wolf Optimizer The Grey Wolf Optimizer (GWO) is a population-based metaheuristic optimization algorithm inspired by the social behavior of grey wolves in nature. The objective function to be optimized is given by the following formula: ```math f(p) = -5.0 \cdot \exp \left( -0.5 \cdot \left( \frac{(x+2.2)^2}{0.4} + \frac{(y-4.3)^2}{0.4} \right) \right) + -2.0 \cdot \exp \left( -0.5 \cdot \left( \frac{(x-2.2)^2}{0.4} + \frac{(y+4.3)^2}{0.4} \right) \right) ``` Or in a more readable form: $$ f(p) = -5.0 \cdot \exp \left( -0.5 \cdot \left( \frac{(x+2.2)^2}{0.4} + \frac{(y-4.3)^2}{0.4} \right) \right) + -2.0 \cdot \exp \left( -0.5 \cdot \left( \frac{(x-2.2)^2}{0.4} + \frac{(y+4.3)^2}{0.4} \right) \right) $$ """, article="## Grey Wolf Optimizer\nThe Grey Wolf Optimizer (GWO) is a population-based metaheuristic optimization algorithm inspired by the social behavior of grey wolves in nature." ) # Launch the Gradio interface iface.launch()