Spaces:
Runtime error
Runtime error
File size: 3,928 Bytes
ad0428a faf91f0 314116b ad0428a faf91f0 ad0428a 314116b ad0428a faf91f0 ad0428a 314116b 1dfaee7 314116b ad0428a 1dfaee7 ad0428a 1dfaee7 ad0428a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
import matplotlib.pyplot as plt
import gradio as gr
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
# fit PCA
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
# fit LDA
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print(
"explained variance ratio (first two components): %s"
% str(pca.explained_variance_ratio_)
)
# save models using skop
def plot_lda_pca():
# fig = plt.figure(1, facecolor="w", figsize=(5,5))
fig, axes = plt.subplots(2,1, sharey= False, sharex=False, figsize = (8,6))
colors = ["navy", "turquoise", "darkorange"]
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
axes[0].scatter(
X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=lw, label=target_name
)
axes[0].legend(loc="lower right")
axes[0].set_title("PCA of IRIS dataset")
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
axes[1].scatter(
X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color, label=target_name
)
plt.legend(loc="best", shadow=False, scatterpoints=1)
axes[1].legend(loc="lower right")
axes[1].set_title("LDA of IRIS dataset")
plt.tight_layout()
return fig
title = "2-D projection of Iris dataset using LDA and PCA"
with gr.Blocks(title=title) as demo:
gr.Markdown(f"# {title}")
gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Linear Discriminant Analysis (LDA) to cluster the Iris dataset based on provided features. <br>"
" PCA applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. <br>"
" <br>"
" For further details please see the sklearn docs:"
)
gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
gr.Markdown(" **Dataset** : The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. . <br>")
# with gr.Row():
# n_samples = gr.Slider(value=100, minimum=10, maximum=1000, step=10, label="n_samples")
# n_components = gr.Slider(value=2, minimum=1, maximum=20, step=1, label="n_components")
# n_features = gr.Slider(value=5, minimum=5, maximum=25, step=1, label="n_features")
# options for n_components
btn = gr.Button(value="Run")
btn.click(plot_lda_pca, outputs= gr.Plot(label='PCA vs LDA clustering') ) #
demo.launch() |