vumichien commited on
Commit
ef72632
1 Parent(s): c301f1d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from sklearn.datasets import load_iris
6
+ from sklearn.model_selection import train_test_split
7
+ from sklearn.feature_selection import SelectKBest, f_classif
8
+ from sklearn.pipeline import make_pipeline
9
+ from sklearn.preprocessing import MinMaxScaler
10
+ from sklearn.svm import LinearSVC
11
+
12
+ theme = gr.themes.Monochrome(
13
+ primary_hue="indigo",
14
+ secondary_hue="blue",
15
+ neutral_hue="slate",
16
+ )
17
+ model_card = f"""
18
+ ## Description
19
+
20
+ **Univariate feature selection** can be used to improve classification accuracy on a noisy dataset.
21
+ In **univariate feature selection**, each feature is evaluated independently, and a statistical test is used to determine its strength of association with the target variable.
22
+ The most important features are then selected based on their statistical significance, typically using a threshold p-value or a pre-defined number of top features to select.
23
+
24
+ In this demo, some noisy (non informative) features are added to the iris dataset then use **Support vector machine (SVM)** to classify the Iris dataset both before and after applying univariate feature selection.
25
+ The results of the feature selection are presented through p-values and weights of SVMs, which are plotted for comparison.
26
+ The objective of this demo is to evaluate the accuracy of the models and assess the impact of univariate feature selection on the model weights.
27
+ You can play around with different ``number of top features`` and ``random seed``.
28
+
29
+ ## Dataset
30
+
31
+ Iris dataset
32
+ """
33
+ # The iris dataset
34
+ X, y = load_iris(return_X_y=True)
35
+
36
+ # Some noisy data not correlated
37
+ E = np.random.RandomState(42).uniform(0, 0.1, size=(X.shape[0], 20))
38
+
39
+ # Add the noisy data to the informative features
40
+ X = np.hstack((X, E))
41
+
42
+
43
+ def do_train(k_features, random_state):
44
+ # Split dataset to select feature and evaluate the classifier
45
+ X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=random_state)
46
+ selector = SelectKBest(f_classif, k=k_features)
47
+ selector.fit(X_train, y_train)
48
+ scores = -np.log10(selector.pvalues_)
49
+ scores /= scores.max()
50
+
51
+
52
+ fig1, axes1 = plt.subplots()
53
+ X_indices = np.arange(X.shape[-1])
54
+ axes1.bar(X_indices - 0.05, scores, width=0.2)
55
+ axes1.set_title("Feature univariate score")
56
+ axes1.set_xlabel("Feature number")
57
+ axes1.set_ylabel(r"Univariate score ($-Log(p_{value})$)")
58
+
59
+ clf = make_pipeline(MinMaxScaler(), LinearSVC())
60
+ clf.fit(X_train, y_train)
61
+
62
+ svm_weights = np.abs(clf[-1].coef_).sum(axis=0)
63
+ svm_weights /= svm_weights.sum()
64
+
65
+ clf_selected = make_pipeline(SelectKBest(f_classif, k=k_features), MinMaxScaler(), LinearSVC())
66
+ clf_selected.fit(X_train, y_train)
67
+
68
+ svm_weights_selected = np.abs(clf_selected[-1].coef_).sum(axis=0)
69
+ svm_weights_selected /= svm_weights_selected.sum()
70
+
71
+ fig2, axes2 = plt.subplots()
72
+ axes2.bar(
73
+ X_indices - 0.45, scores, width=0.2, label=r"Univariate score ($-Log(p_{value})$)"
74
+ )
75
+
76
+ axes2.bar(X_indices - 0.25, svm_weights, width=0.2, label="SVM weight")
77
+
78
+ axes2.bar(
79
+ X_indices[selector.get_support()] - 0.05,
80
+ svm_weights_selected,
81
+ width=0.2,
82
+ label="SVM weights after selection",
83
+ )
84
+
85
+ axes2.set_title("Comparing feature selection")
86
+ axes2.set_xlabel("Feature number")
87
+ axes2.set_yticks(())
88
+ axes2.axis("tight")
89
+ axes2.legend(loc="upper right")
90
+
91
+ text = f"Classification accuracy without selecting features: {clf.score(X_test, y_test)*100:.2f}%. Classification accuracy after univariate feature selection: {clf_selected.score(X_test, y_test)*100:.2f}%"
92
+
93
+ return fig1, fig2, text
94
+
95
+
96
+
97
+ with gr.Blocks(theme=theme) as demo:
98
+ gr.Markdown('''
99
+ <div>
100
+ <h1 style='text-align: center'>Univariate Feature Selection</h1>
101
+ </div>
102
+ ''')
103
+ gr.Markdown(model_card)
104
+ gr.Markdown("Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>. Based on the example from <a href=\"https://scikit-learn.org/stable/auto_examples/feature_selection/plot_feature_selection.html#sphx-glr-auto-examples-feature-selection-plot-feature-selection-py\">scikit-learn</a>")
105
+ k_features = gr.Slider(minimum=2, maximum=10, step=1, value=2, label="Number of top features to select")
106
+ random_state = gr.Slider(minimum=0, maximum=2000, step=1, value=0, label="Random seed")
107
+ with gr.Row():
108
+ with gr.Column():
109
+ plot_1 = gr.Plot(label="Univariate score")
110
+ with gr.Column():
111
+ plot_2 = gr.Plot(label="Comparing feature selection")
112
+ with gr.Row():
113
+ resutls = gr.Textbox(label="Results")
114
+
115
+ k_features.change(fn=do_train, inputs=[k_features, random_state], outputs=[plot_1, plot_2, resutls])
116
+ random_state.change(fn=do_train, inputs=[k_features, random_state], outputs=[plot_1, plot_2, resutls])
117
+
118
+ demo.launch()