SGD-convex-loss / app.py
Jayabalambika's picture
Update app.py
7c95071
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.0] = 0
return loss
def plot_loss_func():
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.clf()
fig = plt.figure(figsize=(10, 10), dpi=100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color="gold", lw=lw, label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color="teal", lw=lw, label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color="yellowgreen", lw=lw, label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color="cornflowerblue", lw=lw, label="Log loss")
plt.plot(
xx,
np.where(xx < 1, 1 - xx, 0) ** 2,
color="orange",
lw=lw,
label="Squared hinge loss",
)
plt.plot(
xx,
modified_huber_loss(xx, 1),
color="darkorchid",
lw=lw,
linestyle="--",
label="Modified Huber loss",
)
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
return fig
title = "SGD convex loss functions"
detail = "This plot shows the convex loss functions supported by SGDClassifiers(Linear classifiers (SVM, logistic regression, etc.) with SGD training)."
def explain(name):
# print("name=",name)
if name == "0-1 loss":
docstr = "Explanation for " + name + ": " +\
" This is the simplest loss function used in classification problems. It counts how many mistakes a hypothesis function makes on a training set. " +\
" A loss of 1 is accounted if its mispredicted and a loss of 0 for the correct prediction. " +\
" This function is non differentiable and hence not used in Optimization problems. "
elif name == "Hinge loss":
docstr = "Explanation for " + name + ": " +\
" This is the loss function used in maximum-margin classification in SVMs. "+\
" Z_i = y_i*(w.T * x_i + b), if Z_i > 0 the point x_i is correctly classified and Z_i < 0 , x_i is incorrectly classified "+\
" Z_i >= 1, hinge loss =0 , Z_i < 1 , hinge loss = 1- Z_i "
elif name == "Perceptron loss":
docstr = "Explanation for " + name + ": " +\
" This is the linear loss function used in perceptron algorithm. "+\
" The binary classifier function which decides whether the input represented by vector of numbers belongs to a class or not. "
elif name == "Squared Hinge loss":
docstr = "Explanation for " + name + ":" +\
" This represents the square verison of Hinge loss and used in classification algorithms where Performance is important. "+\
" If we want a more fine decision boundary where we want to punish larger errors more significantly than the smaller errors. "
elif name == "Modified Huber loss":
docstr = "Explanation for " + name + ":" +\
" The Huber loss function balances the best of both Mean Squared Error and Mean Absolute Error. "+\
" Its a piecewise function and hyper parameter delta is to be found first and then loss optimization step."
else:
docstr = " Logistic Loss is a loss function used for Logistic Regression. Please refer wikipedia for the Log loss equation." +\
" L2 regularization is most important for logistic regression models. "
return docstr
with gr.Blocks(title=title) as demo:
gr.Markdown(f"# {title}")
gr.Markdown(f"# {detail}")
gr.Markdown(" **[Demo is based on sklearn docs](https://scikit-learn.org/stable/auto_examples/linear_model/plot_sgd_loss_functions.html#sphx-glr-auto-examples-linear-model-plot-sgd-loss-functions-py)**")
with gr.Column(variant="panel"):
btn = gr.Button(value="SGD convex loss functions")
btn.click(plot_loss_func, outputs= gr.Plot() ) #
dd = gr.Dropdown(["0-1 loss", "Hinge loss", "Perceptron loss", "Squared Hinge loss", "Modified Huber loss", "Log Loss"], label="loss", info="Select a Loss from the dropdown for a detailed explanation")
# inp = gr.Textbox(placeholder="Select a Loss from the dropdown for a detailed explanation")
out = gr.Textbox(label="explanation of the loss function")
dd.change(explain, dd, out)
demo.launch()