TroglodyteDerivations's picture
Updated with removal of hashtag on line 536. And hashtag removal on line 511
d84e707 verified
raw
history blame
20 kB
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from io import BytesIO
from PIL import Image
import gradio as gr
from mpl_toolkits.mplot3d import Axes3D
class Objective:
def Evaluate(self, p):
return -5.0*np.exp(-0.5*((p[0]+2.2)**2/0.4+(p[1]-4.3)**2/0.4)) + -2.0*np.exp(-0.5*((p[0]-2.2)**2/0.4+(p[1]+4.3)**2/0.4))
# Create an instance of the Objective class
obj = Objective()
# Evaluate the fitness of a position
position = np.array([-2.2, 4.3])
fitness = obj.Evaluate(position)
print(f"The fitness of the position {position} is {fitness}")
class Bounds:
def __init__(self, lower, upper, enforce="clip"):
self.lower = np.array(lower)
self.upper = np.array(upper)
self.enforce = enforce.lower()
def Upper(self):
return self.upper
def Lower(self):
return self.lower
def Limits(self, pos):
npart, ndim = pos.shape
for i in range(npart):
for j in range(ndim):
if pos[i, j] < self.lower[j]:
if self.enforce == "clip":
pos[i, j] = self.lower[j]
elif self.enforce == "resample":
pos[i, j] = self.lower[j] + np.random.random() * (self.upper[j] - self.lower[j])
elif pos[i, j] > self.upper[j]:
if self.enforce == "clip":
pos[i, j] = self.upper[j]
elif self.enforce == "resample":
pos[i, j] = self.lower[j] + np.random.random() * (self.upper[j] - self.lower[j])
pos[i] = self.Validate(pos[i])
return pos
def Validate(self, pos):
return pos
# Define the bounds
lower_bounds = [-6, -6]
upper_bounds = [6, 6]
# Create an instance of the Bounds class
bounds = Bounds(lower_bounds, upper_bounds, enforce="clip")
# Define a set of positions
positions = np.array([[15, 15], [-15, -15], [5, 15], [15, 5]])
# Enforce the bounds on the positions
valid_positions = bounds.Limits(positions)
print(f"Valid positions: {valid_positions}")
# Define the bounds
lower_bounds = [-6, -6]
upper_bounds = [6, 6]
# Create an instance of the Bounds class
bounds = Bounds(lower_bounds, upper_bounds, enforce="resample")
# Define a set of positions
positions = np.array([[15, 15], [-15, -15], [5, 15], [15, 5]])
# Enforce the bounds on the positions
valid_positions = bounds.Limits(positions)
print(f"Valid positions: {valid_positions}")
class QuasiRandomInitializer:
def __init__(self, npart=10, ndim=2, bounds=None, k=1, jitter=0.0):
self.npart = npart
self.ndim = ndim
self.bounds = bounds
self.k = k
self.jitter = jitter
self.primes = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659
]
def Halton(self, i, b):
f = 1.0
r = 0
while i > 0:
f = f / b
r = r + f * (i % b)
i = math.floor(i / b)
return r
def InitializeSwarm(self):
self.swarm = np.zeros((self.npart, self.ndim))
lo = np.zeros(self.ndim)
hi = np.ones(self.ndim)
if self.bounds is not None:
lo = self.bounds.Lower()
hi = self.bounds.Upper()
for i in range(self.npart):
for j in range(self.ndim):
h = self.Halton(i + self.k, self.primes[j % len(self.primes)])
q = self.jitter * (np.random.random() - 0.5)
self.swarm[i, j] = lo[j] + (hi[j] - lo[j]) * h + q
if self.bounds is not None:
self.swarm = self.bounds.Limits(self.swarm)
return self.swarm
# Define the bounds
lower_bounds = [-6, -6]
upper_bounds = [6, 6]
bounds = Bounds(lower_bounds, upper_bounds, enforce="clip")
# Create an instance of the QuasiRandomInitializer class
init = QuasiRandomInitializer(npart=50, ndim=2, bounds=bounds)
# Initialize the swarm
swarm_positions = init.InitializeSwarm()
print(f"Initial swarm positions: {swarm_positions}")
# Define the bounds
lower_bounds = [-6, -6]
upper_bounds = [6, 6]
bounds = Bounds(lower_bounds, upper_bounds, enforce="resample")
# Create an instance of the QuasiRandomInitializer class
init = QuasiRandomInitializer(npart=50, ndim=2, bounds=bounds)
# Initialize the swarm
swarm_positions = init.InitializeSwarm()
print(f"Initial swarm positions: {swarm_positions}")
class GWO:
def __init__(self, obj, eta=2.0, npart=10, ndim=2, max_iter=200,tol=None,init=None,done=None,bounds=None):
self.obj = obj
self.npart = npart
self.ndim = ndim
self.max_iter = max_iter
self.init = init
self.done = done
self.bounds = bounds
self.tol = tol
self.eta = eta
self.initialized = False
def Initialize(self):
"""Set up the swarm"""
self.initialized = True
self.iterations = 0
self.pos = self.init.InitializeSwarm() # initial swarm positions
self.vpos= np.zeros(self.npart)
for i in range(self.npart):
self.vpos[i] = self.obj.Evaluate(self.pos[i])
# Initialize the list to store positions at each iteration
self.all_positions = []
self.all_positions.append(self.pos.copy()) # Store the initial positions
# Swarm bests
self.gidx = []
self.gbest = []
self.gpos = []
self.giter = []
idx = np.argmin(self.vpos)
self.gidx.append(idx)
self.gbest.append(self.vpos[idx])
self.gpos.append(self.pos[idx].copy())
self.giter.append(0)
# 1st, 2nd, and 3rd best positions
idx = np.argsort(self.vpos)
self.alpha = self.pos[idx[0]].copy()
self.valpha= self.vpos[idx[0]]
self.beta = self.pos[idx[1]].copy()
self.vbeta = self.vpos[idx[1]]
self.delta = self.pos[idx[2]].copy()
self.vdelta= self.vpos[idx[2]]
# *** Gradio app method optimize created [leveraged vis-a-vis optimize function on the outside of the underlying anatomy of GWO class] ***
def optimize(self):
"""
Run a full optimization and return the best positions and fitness values.
This method is designed to be used with Gradio.
"""
# Initialize the swarm
self.Initialize()
# Lists to store the best positions and fitness values at each step
best_positions = []
best_fitness = []
# Main loop
while not self.Done():
self.Step() # Perform an optimization step
# Update best_positions and best_fitness with the current best values
best_positions.append(self.gbest[-1])
best_fitness.append(self.gpos[-1])
# Convert the list of best positions to a NumPy array
best_positions_array = np.array(best_positions)
np.save('best_positions_array', best_positions_array)
# Print the best positions and fitness found
print("Best Positions:", best_positions)
print("Best Fitness:", best_fitness)
# Return the best positions and fitness after the optimization
return best_positions, best_fitness
def Step(self):
"""Do one swarm step"""
# a from eta ... zero (default eta is 2)
a = self.eta - self.eta*(self.iterations/self.max_iter)
# Update everyone
for i in range(self.npart):
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Dalpha = np.abs(C*self.alpha - self.pos[i])
X1 = self.alpha - A*Dalpha
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Dbeta = np.abs(C*self.beta - self.pos[i])
X2 = self.beta - A*Dbeta
A = 2*a*np.random.random(self.ndim) - a
C = 2*np.random.random(self.ndim)
Ddelta = np.abs(C*self.delta - self.pos[i])
X3 = self.delta - A*Ddelta
self.pos[i,:] = (X1+X2+X3) / 3.0
# Keep in bounds
if (self.bounds != None):
self.pos = self.bounds.Limits(self.pos)
# Get objective function values and check for new leaders
for i in range(self.npart):
self.vpos[i] = self.obj.Evaluate(self.pos[i])
# new alpha?
if (self.vpos[i] < self.valpha):
self.vdelta = self.vbeta
self.delta = self.beta.copy()
self.vbeta = self.valpha
self.beta = self.alpha.copy()
self.valpha = self.vpos[i]
self.alpha = self.pos[i].copy()
# new beta?
if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta):
self.vdelta = self.vbeta
self.delta = self.beta.copy()
self.vbeta = self.vpos[i]
self.beta = self.pos[i].copy()
# new delta?
if (self.vpos[i] > self.valpha) and (self.vpos[i] < self.vbeta) and (self.vpos[i] < self.vdelta):
self.vdelta = self.vpos[i]
self.delta = self.pos[i].copy()
# is alpha new swarm best?
if (self.valpha < self.gbest[-1]):
self.gidx.append(i)
self.gbest.append(self.valpha)
np.save('best_fitness.npy', np.array(self.gbest))
self.gpos.append(self.alpha.copy())
np.save('best_positions.npy', np.array(self.gpos))
# Save the positions at the current iteration
self.all_positions.append(self.pos.copy())
np.save('all_positions.npy', np.array(self.all_positions), allow_pickle=True)
self.giter.append(self.iterations)
self.iterations += 1
def Done(self):
"""Check if we are done"""
if (self.done == None):
if (self.tol == None):
return (self.iterations == self.max_iter)
else:
return (self.gbest[-1] < self.tol) or (self.iterations == self.max_iter)
else:
return self.done.Done(self.gbest,
gpos=self.gpos,
pos=self.pos,
max_iter=self.max_iter,
iteration=self.iterations)
def Evaluate(self, pos):
p = np.zeros(self.npart)
for i in range(self.npart):
p[i] = self.obj.Evaluate(pos[i])
return p
def Results(self):
if (not self.initialized):
return None
return {
"npart": self.npart,
"ndim": self.ndim,
"max_iter": self.max_iter,
"iterations": self.iterations,
"tol": self.tol,
"eta": self.eta,
"gbest": self.gbest,
"giter": self.giter,
"gpos": self.gpos,
"gidx": self.gidx,
"pos": self.pos,
"vpos": self.vpos
}
def plot_contour_and_wolves(self, wolf_positions):
# Ensure wolf_positions is a 2D array
best_positions_1D = np.load('best_positions.npy')
wolf_positions = best_positions_1D
# Define the objective function
def objective_function(x, y):
return -5.0*np.exp(-0.5*((x+2.2)**2/0.4+(y-4.3)**2/0.4)) + -2.0*np.exp(-0.5*((x-2.2)**2/0.4+(y+4.3)**2/0.4))
# Determine the search space boundaries based on the wolf positions
x_min, x_max = wolf_positions[:, 0].min() - 1, wolf_positions[:, 0].max() + 1
y_min, y_max = wolf_positions[:, 1].min() - 1, wolf_positions[:, 1].max() + 1
# Generate a grid of points within the determined search space
x = np.linspace(x_min, x_max, 100)
y = np.linspace(y_min, y_max, 100)
X, Y = np.meshgrid(x, y)
# Evaluate the objective function on the grid
Z = objective_function(X, Y)
# Plot the contour map
plt.figure(figsize=(10, 8))
contour = plt.contour(X, Y, Z, levels=20, cmap='magma')
plt.colorbar(contour)
# Plot the wolf positions
plt.plot(wolf_positions[:, 0], wolf_positions[:, 1], 'ro', markersize=5, label='Wolves')
# Set plot title and labels
plt.title('Contour Map of Wolves Oscillating Over Search Space')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
# Set the x and y limits of the plot based on the determined search space
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
# Convert the plot to a PIL Image and return it
buf = BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.close() # Close the figure to free up memory
return Image.open(buf)
#def Dispersion(self):
#"""Calculate the dispersion of the swarm"""
#x, y = self.gpos[:, 0], self.gpos[:, 1]
#dx = x.max() - x.min()
#dy = y.max() - y.min()
#return (dx + dy) / 2.0
#def Dispersion(self):
# """Calculate the dispersion of the swarm"""
#dispersion = np.std(self.gpos[:, 0]) + np.std(self.gpos[:, 1])
#return dispersion
def Dispersion(self):
"""Calculate the dispersion of the swarm"""
# Ensure self.gpos is a NumPy array
if not isinstance(self.gpos, np.ndarray):
self.gpos = np.array(self.gpos)
# Now self.gpos should be a NumPy array, so we can calculate the dispersion
x, y = self.gpos[:, 0], self.gpos[:, 1]
dx = x.max() - x.min()
dy = y.max() - y.min()
return (dx + dy) / 2.0
def plot_dispersion_heatmap(self, x_range, y_range, resolution=100):
# Create a grid of points within the specified range
x = np.linspace(*x_range, resolution)
y = np.linspace(*y_range, resolution)
X, Y = np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()]).T
# Calculate the dispersion for each position in the grid
dispersion_values = np.array([self.Dispersion() for _ in positions])
Z = dispersion_values.reshape(X.shape)
# Plot the dispersion heatmap
plt.figure(figsize=(10, 8))
plt.pcolormesh(X, Y, Z, cmap='viridis')
plt.colorbar(label='Dispersion')
# Set plot title and labels
plt.title('Dispersion Heatmap')
plt.xlabel('x')
plt.ylabel('y')
# Convert the plot to a PIL Image and return it
buf = BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.close() # Close the figure to free up memory
return Image.open(buf)
def plot_dispersion(self):
"""Plot the dispersion over time"""
# Assuming self.giter stores the iteration number at which each best position was found
# and self.gbest stores the corresponding best fitness values
dispersion_values = [self.Dispersion() for _ in range(self.max_iter)]
plt.figure(figsize=(10, 6))
plt.plot(range(self.max_iter), dispersion_values, label='Dispersion')
plt.xlabel('Iteration')
plt.ylabel('Dispersion')
plt.title('Evolution of Dispersion')
plt.legend()
plt.show()
# Convert the plot to a PIL Image and return it
buf = BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
plt.close() # Close the figure to free up memory
return Image.open(buf)
#def plot_dispersion_heatmap(self, x_range, y_range, grid_size=100):
#"""Plot a heatmap of dispersion over a grid of positions"""
#x_values = np.linspace(x_range[0], x_range[1], grid_size)
#y_values = np.linspace(y_range[0], y_range[1], grid_size)
#X, Y = np.meshgrid(x_values, y_values)
#positions = np.vstack([X.ravel(), Y.ravel()]).T
# Calculate dispersion for each position in the grid
#dispersion_values = np.array([self.Dispersion(pos) for pos in positions])
#Z = dispersion_values.reshape(X.shape)
#plt.figure(figsize=(10, 8))
#plt.imshow(Z, extent=(x_range[0], x_range[1], y_range[0], y_range[1]), origin='lower', cmap='viridis')
#plt.colorbar(label='Dispersion')
#plt.title('Heatmap of Dispersion')
#plt.xlabel('X')
#plt.ylabel('Y')
#plt.show()
# Convert the plot to a PIL Image and return it
#buf = BytesIO()
#plt.savefig(buf, format='png')
#buf.seek(0)
#plt.close() # Close the figure to free up memory
#return Image.open(buf)
def optimize(npart, ndim, max_iter):
# Initialize the GWO algorithm with the provided parameters
gwo = GWO(obj=obj, npart=npart, ndim=ndim, max_iter=max_iter, init=init, bounds=bounds)
best_positions, best_fitness= gwo.optimize()
# Convert best_fitness and best_positions to NumPy arrays if necessary
best_fitness_npy = np.array(best_fitness)
best_positions_npy = np.array(best_positions)
# Calculate dispersion
dispersion = gwo.Dispersion()
dispersion_text = f"Dispersion: {dispersion}"
# Load best_positions_loaded_array
best_positions_array = np.load('best_positions_array.npy')
# Plot the contour_and_wolves
plot_contour_and_wolves = gwo.plot_contour_and_wolves(best_positions_array)
# Plot the dispersion over time
dispersion_plot = gwo.plot_dispersion()
# Plot the dispersion heatmap
#dispersion_heatmap_plot = gwo.plot_dispersion_heatmap(x_range=(-6,6), y_range=(-6,6))
# Format the output strings
best_fitness_text = f"Best Fitness: {best_fitness_npy}"
best_positions_text = f"Best Positions: {best_positions_npy}"
# Return the images and text
return plot_contour_and_wolves, dispersion_plot, best_fitness_text, best_positions_text, best_fitness_npy, best_positions_npy, dispersion_text #dispersion_heatmap_plot, best_fitness_text, best_positions_text, best_fitness_npy, best_positions_npy, dispersion_text
# Define the Gradio interface
iface = gr.Interface(
fn=optimize, # Pass the optimize function object
inputs=[
gr.components.Slider(10, 50, 50, step=1, label="Number of Wolves [Default = 50 Wolves]"),
gr.components.Slider(2, 2, 2, step=1, label="Two-Dimensional Search Space"),
gr.components.Slider(100, 200, 200, step=1, label="Maximum Iterations [Default = 200 Epochs]"),
],
outputs=[
gr.components.Image(type="pil", label="Contour Map of Wolves Oscillating Over Search Space"),
gr.components.Image(type="pil", label="Dispersion Plot"),
#gr.components.Image(type="pil", label="Dispersion Heatmap Plot"),
gr.components.Textbox(label="Dispersion Values"),
gr.components.Textbox(label="Best Positions"),
gr.components.Textbox(label="Best Fitness")
],
title="Grey Wolf Optimizer",
description="Optimize the objective function using the Grey Wolf Optimizer.",
article="## Grey Wolf Optimizer\nThe Grey Wolf Optimizer (GWO) is a population-based metaheuristic optimization algorithm inspired by the social behavior of grey wolves in nature."
)
# Launch the Gradio interface
iface.launch()