huabdul commited on
Commit
921471e
1 Parent(s): 9fee82f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -70
app.py CHANGED
@@ -1,78 +1,101 @@
1
  import numpy as np
2
- import matplotlib.pyplot as plt
3
- from matplotlib.colors import ListedColormap
4
- plt.rcParams['figure.dpi'] = 100
5
- plt.style.use('ggplot')
6
 
7
- from sklearn.model_selection import train_test_split
8
  from sklearn.preprocessing import StandardScaler
9
- from sklearn.datasets import make_moons, make_circles, make_classification
10
  from sklearn.neural_network import MLPClassifier
11
- from sklearn.pipeline import make_pipeline
12
 
13
  import gradio as gr
14
- #==========================================================================
15
- C1, C2 = '#ff0000', '#0000ff'
16
- CMAP = ListedColormap([C1, C2])
17
- GRANULARITY = 0.01
18
  MARGIN = 0.5
19
  N_SAMPLES = 150
20
- #==========================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def get_decision_surface(X, model):
22
  x_min, x_max = X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN
23
  y_min, y_max = X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN
24
  xrange = np.arange(x_min, x_max, GRANULARITY)
25
  yrange = np.arange(y_min, y_max, GRANULARITY)
26
- xx, yy = np.meshgrid(xrange, yrange)
27
-
28
- Z = model.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]
29
- Z = Z.reshape(xx.shape)
30
- return xx, yy, Z
31
-
32
- def create_plot(alpha, seed):
33
- X, y = make_classification(
34
- n_samples=N_SAMPLES, n_features=2, n_redundant=0, n_informative=2, random_state=seed, n_clusters_per_class=1
35
- )
36
- rng = np.random.RandomState(seed)
37
- X += 2 * rng.uniform(size=X.shape)
38
- linearly_separable = (X, y)
39
- datasets = [
40
- make_moons(n_samples=N_SAMPLES, noise=0.3, random_state=seed),
41
- make_circles(n_samples=N_SAMPLES, noise=0.2, factor=0.5, random_state=seed),
42
- linearly_separable
43
- ]
44
-
45
- model = make_pipeline(
46
- StandardScaler(),
47
- MLPClassifier(
48
- solver="lbfgs",
49
- alpha=alpha,
50
- random_state=seed,
51
- max_iter=2000,
52
- early_stopping=True,
53
- hidden_layer_sizes=[10, 10]))
54
-
55
- fig = plt.figure(figsize=(7, 7))
56
- for i, ds in enumerate(datasets):
57
- X, y = ds
58
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=seed)
59
- model.fit(X_train, y_train)
60
-
61
- ax = fig.add_subplot(3, 2, 2*i+1)
62
- ax.set_xticks(()); ax.set_yticks(())
63
- ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=CMAP, edgecolor='k', s=40)
64
- ax.set_xlim((X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN))
65
- ax.set_ylim((X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN))
66
- if i == 0: ax.set_title('Training Data')
67
-
68
- ax = fig.add_subplot(3, 2, 2*i+2)
69
- ax.set_xticks(()); ax.set_yticks(())
70
- xx, yy, Z = get_decision_surface(X, model)
71
- ax.contourf(xx, yy, Z, cmap=plt.cm.RdBu, alpha=0.65)
72
- ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=CMAP, edgecolor='k', s=40, marker="X")
73
- if i == 0: ax.set_title('Testing Data')
74
-
75
- fig.set_tight_layout(True)
76
  return fig
77
 
78
  info = '''
@@ -82,7 +105,7 @@ This example demonstrates the effect of varying the regularization parameter (al
82
 
83
  Higher values of alpha encourages smaller weights, thus making the model less prone to overfitting, while lower values may help against underfitting. Use the slider below to control the amount of regularization and observe how the decision surface changes with higher values.
84
 
85
- The color of the decision surface represents the probability of observing the class. Darker colors mean higher probability and thus higher confidence, and vice versa.
86
 
87
  Created by [@huabdul](https://huggingface.co/huabdul) based on [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_alpha.html).
88
  '''
@@ -90,14 +113,19 @@ with gr.Blocks(analytics_enabled=False) as demo:
90
  with gr.Row():
91
  with gr.Column():
92
  gr.Markdown(info)
93
- s_alpha = gr.Slider(0, 4, value=0.1, step=0.05, label='Alpha (regularization parameter)')
94
- s_seed = gr.Slider(1, 5000, value=1, step=1, label='Random seed')
 
 
 
 
 
 
 
95
  with gr.Column():
96
  plot = gr.Plot(show_label=False)
97
 
98
- s_alpha.change(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
99
- s_seed.change(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
100
- demo.load(create_plot, inputs=[s_alpha, s_seed], outputs=[plot])
101
 
102
- demo.launch()
103
- #==========================================================================
 
1
  import numpy as np
2
+ import plotly.graph_objects as go
 
 
 
3
 
 
4
  from sklearn.preprocessing import StandardScaler
5
+ from sklearn.datasets import make_moons, make_circles, make_classification, make_blobs
6
  from sklearn.neural_network import MLPClassifier
 
7
 
8
  import gradio as gr
9
+
10
+ # =========================================================================
11
+
12
+ GRANULARITY = 0.2
13
  MARGIN = 0.5
14
  N_SAMPLES = 150
15
+ SEED = 1
16
+
17
+ datasets = {}
18
+ X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=SEED)
19
+ X = StandardScaler().fit_transform(X)
20
+ datasets["Moons"] = (X.copy(), y.copy())
21
+
22
+ X, y = make_circles(n_samples=N_SAMPLES, noise=0.2, factor=0.5, random_state=SEED)
23
+ X = StandardScaler().fit_transform(X)
24
+ datasets["Circles"] = (X.copy(), y.copy())
25
+
26
+ X, y = make_blobs(n_samples=N_SAMPLES, n_features=2, centers=4, cluster_std=2, random_state=SEED)
27
+ X = StandardScaler().fit_transform(X)
28
+ y[y==2] = 0
29
+ y[y==3] = 1
30
+ datasets["Blobs"] = (X.copy(), y.copy())
31
+
32
+ X, y = make_classification(n_samples=N_SAMPLES, n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1, random_state=SEED)
33
+ X += 2 * np.random.uniform(size=X.shape)
34
+ X = StandardScaler().fit_transform(X)
35
+ datasets["Linear"] = (X.copy(), y.copy())
36
+
37
+ # =========================================================================
38
+
39
+ def get_figure_dict():
40
+ figure_dict = dict(data=[], layout={}, frames=[])
41
+
42
+ play_button = dict(args=[None, {"mode": "immediate", "fromcurrent": False, "frame": {"duration": 50}, "transition": {"duration": 50}}],
43
+ label="Play",
44
+ method="animate")
45
+
46
+ pause_button = dict(args=[[None], {"mode": "immediate"}],
47
+ label="Stop",
48
+ method="animate")
49
+
50
+ slider = dict(steps=[], active=0, currentvalue={"prefix": "Iteration: "})
51
+
52
+ figure_dict["layout"] = dict(width=600, height=600, hovermode=False, margin=dict(l=40, r=40, t=40, b=40),
53
+ title=dict(text="Decision Surface", x=0.5),
54
+ sliders=[slider],
55
+ updatemenus=[dict(buttons=[play_button, pause_button], direction="left", pad={"t": 85}, type="buttons", x=0.6, y=-0.05)]
56
+ )
57
+
58
+ return figure_dict
59
+
60
  def get_decision_surface(X, model):
61
  x_min, x_max = X[:, 0].min() - MARGIN, X[:, 0].max() + MARGIN
62
  y_min, y_max = X[:, 1].min() - MARGIN, X[:, 1].max() + MARGIN
63
  xrange = np.arange(x_min, x_max, GRANULARITY)
64
  yrange = np.arange(y_min, y_max, GRANULARITY)
65
+ x, y = np.meshgrid(xrange, yrange)
66
+ x = x.ravel(); y = y.ravel()
67
+ z = model.predict_proba(np.column_stack([x, y]))[:, 1]
68
+ return x, y, z
69
+ # =========================================================================
70
+
71
+ def create_plot(dataset, alpha, h1, h2, seed):
72
+ X, y = datasets[dataset]
73
+
74
+ model = MLPClassifier(alpha=alpha, max_iter=2000, learning_rate_init=0.01, hidden_layer_sizes=[h1, h2], random_state=seed)
75
+
76
+ figure_dict = get_figure_dict()
77
+
78
+ model.partial_fit(X, y, classes=[0, 1])
79
+ xx, yy, zz = get_decision_surface(X, model)
80
+ figure_dict["data"] = [go.Contour(x=xx, y=yy, z=zz, opacity=0.6, showscale=False,),
81
+ go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers", marker_color=y, marker={"colorscale": "jet", "size": 8})]
82
+
83
+ prev_loss = np.inf
84
+ tol = 3e-4
85
+ for i in range(100):
86
+ for _ in range(3):
87
+ model.partial_fit(X, y, classes=[0, 1])
88
+
89
+ if prev_loss - model.loss_ <= tol: break
90
+ prev_loss = model.loss_
91
+
92
+ xx, yy, zz = get_decision_surface(X, model)
93
+ figure_dict["frames"].append({"data": [go.Contour(x=xx, y=yy, z=zz, opacity=0.6, showscale=False)], "name": i})
94
+
95
+ slider_step = {"args": [[i], {"mode": "immediate"}], "method": "animate", "label": i}
96
+ figure_dict["layout"]["sliders"][0]["steps"].append(slider_step)
97
+
98
+ fig = go.Figure(figure_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  return fig
100
 
101
  info = '''
 
105
 
106
  Higher values of alpha encourages smaller weights, thus making the model less prone to overfitting, while lower values may help against underfitting. Use the slider below to control the amount of regularization and observe how the decision surface changes with higher values.
107
 
108
+ The neural network is trained until the loss stops decreasing below a specific tolerance. The color of the decision surface represents the probability of observing the corresponding class.
109
 
110
  Created by [@huabdul](https://huggingface.co/huabdul) based on [scikit-learn docs](https://scikit-learn.org/stable/auto_examples/neural_networks/plot_mlp_alpha.html).
111
  '''
 
113
  with gr.Row():
114
  with gr.Column():
115
  gr.Markdown(info)
116
+ dd_dataset = gr.Dropdown(list(datasets.keys()), value="Moons", label="Dataset", interactive=True)
117
+ with gr.Row():
118
+ with gr.Column(min_width=100):
119
+ s_alpha = gr.Slider(0, 4, value=0.1, step=0.05, label="α (regularization parameter)")
120
+ s_seed = gr.Slider(1, 1000, value=1, step=1, label="Seed")
121
+ with gr.Column(min_width=100):
122
+ s_h1 = gr.Slider(2, 20, value=10, step=1, label="Hidden layer 1 size")
123
+ s_h2 = gr.Slider(2, 20, value=10, step=1, label="Hidden layer 2 size")
124
+ submit = gr.Button("Submit")
125
  with gr.Column():
126
  plot = gr.Plot(show_label=False)
127
 
128
+ submit.click(create_plot, inputs=[dd_dataset, s_alpha, s_h1, s_h2, s_seed], outputs=[plot])
129
+ demo.load(create_plot, inputs=[dd_dataset, s_alpha, s_h1, s_h2, s_seed], outputs=[plot])
 
130
 
131
+ demo.launch()