Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from matplotlib.colors import ListedColormap
|
4 |
+
plt.rcParams['figure.dpi'] = 100
|
5 |
+
plt.style.use('ggplot')
|
6 |
+
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
from sklearn.preprocessing import StandardScaler
|
9 |
+
from sklearn.datasets import make_moons, make_circles, make_classification
|
10 |
+
from sklearn.neural_network import MLPClassifier
|
11 |
+
from sklearn.pipeline import make_pipeline
|
12 |
+
|
13 |
+
import gradio as gr
|
14 |
+
#==========================================================================
|
15 |
+
C1, C2 = '#ff0000', '#0000ff'
|
16 |
+
CMAP = ListedColormap([C1, C2])
|
17 |
+
GRANULARITY = 0.01
|
18 |
+
MARGIN = 0.5
|
19 |
+
N_SAMPLES = 150
|
20 |
+
#==========================================================================
|
21 |
+
def get_decision_surface(X, model):
|
22 |
+
x_min, x_max = X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN
|
23 |
+
y_min, y_max = X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN
|
24 |
+
xrange = np.arange(x_min, x_max, GRANULARITY)
|
25 |
+
yrange = np.arange(y_min, y_max, GRANULARITY)
|
26 |
+
xx, yy = np.meshgrid(xrange, yrange)
|
27 |
+
|
28 |
+
Z = model.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]
|
29 |
+
Z = Z.reshape(xx.shape)
|
30 |
+
return xx, yy, Z
|
31 |
+
|
32 |
+
def create_plot(alpha, seed):
|
33 |
+
X, y = make_classification(
|
34 |
+
n_samples=N_SAMPLES, n_features=2, n_redundant=0, n_informative=2, random_state=seed, n_clusters_per_class=1
|
35 |
+
)
|
36 |
+
rng = np.random.RandomState(seed)
|
37 |
+
X += 2 * rng.uniform(size=X.shape)
|
38 |
+
linearly_separable = (X, y)
|
39 |
+
datasets = [
|
40 |
+
make_moons(n_samples=N_SAMPLES, noise=0.3, random_state=seed),
|
41 |
+
make_circles(n_samples=N_SAMPLES, noise=0.2, factor=0.5, random_state=seed),
|
42 |
+
linearly_separable
|
43 |
+
]
|
44 |
+
|
45 |
+
model = make_pipeline(
|
46 |
+
StandardScaler(),
|
47 |
+
MLPClassifier(
|
48 |
+
solver="lbfgs",
|
49 |
+
alpha=alpha,
|
50 |
+
random_state=seed,
|
51 |
+
max_iter=2000,
|
52 |
+
early_stopping=True,
|
53 |
+
hidden_layer_sizes=[10, 10]))
|
54 |
+
|
55 |
+
fig = plt.figure(figsize=(7, 7))
|
56 |
+
for i, ds in enumerate(datasets):
|
57 |
+
X, y = ds
|
58 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=SEED)
|
59 |
+
model.fit(X_train, y_train)
|
60 |
+
|
61 |
+
ax = fig.add_subplot(3, 2, 2*i+1)
|
62 |
+
ax.set_xticks(()); ax.set_yticks(())
|
63 |
+
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=CMAP, edgecolor='k', s=40)
|
64 |
+
ax.set_xlim((X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN))
|
65 |
+
ax.set_ylim((X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN))
|
66 |
+
if i == 0: ax.set_title('Training Data')
|
67 |
+
|
68 |
+
ax = fig.add_subplot(3, 2, 2*i+2)
|
69 |
+
ax.set_xticks(()); ax.set_yticks(())
|
70 |
+
xx, yy, Z = get_decision_surface(X, model)
|
71 |
+
ax.contourf(xx, yy, Z, cmap=plt.cm.RdBu, alpha=0.65)
|
72 |
+
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=CMAP, edgecolor='k', s=40, marker="X")
|
73 |
+
if i == 0: ax.set_title('Testing Data')
|
74 |
+
|
75 |
+
fig.set_tight_layout(True)
|
76 |
+
return fig
|
77 |
+
|
78 |
+
info = '''
|
79 |
+
# Effect of Regularization Parameter of Multilayer Perceptron
|
80 |
+
|
81 |
+
This example demonstrates the effect of varying the regularization parameter (alpha) of a multilayer perceptron on the binary classification of toy datasets, as represented by the decision surface of the classifier.
|
82 |
+
|
83 |
+
Higher values of alpha encourages smaller weights, thus making the model less prone to overfitting, while lower values may help against underfitting. Use the slider below to control the amount of regularization and observe how the decision surface changes with higher values.
|
84 |
+
|
85 |
+
The color of the decision surface represents the probability of observing the class. Darker colors mean higher probability and thus higher confidence, and vice versa.
|
86 |
+
|
87 |
+
Created by [@huabdul](https://huggingface.co/huabdul) based on [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_alpha.html).
|
88 |
+
'''
|
89 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
90 |
+
with gr.Row():
|
91 |
+
with gr.Column():
|
92 |
+
gr.Markdown(info)
|
93 |
+
s_alpha = gr.Slider(0, 4, value=0.1, step=0.05, label='Alpha (regularization parameter)')
|
94 |
+
s_seed = gr.Slider(1, 5000, value=1, step=1, label='Random seed')
|
95 |
+
with gr.Column():
|
96 |
+
plot = gr.Plot(show_label=False)
|
97 |
+
|
98 |
+
s_alpha.change(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
|
99 |
+
s_seed.change(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
|
100 |
+
demo.load(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
|
101 |
+
|
102 |
+
demo.launch()
|
103 |
+
#==========================================================================
|