NTaylor commited on
Commit
cd8213d
·
1 Parent(s): bebe690

Cleaning up

Browse files
Files changed (1) hide show
  1. app.py +22 -8
app.py CHANGED
@@ -36,9 +36,22 @@ target_names = iris.target_names
36
 
37
 
38
  def plot_lda_pca(n_samples = 50, n_features = 4):
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # print(f"all X is: {all_X}")
41
 
 
42
  idx = np.random.randint(0, len(iris.data), n_samples)
43
  # sub-sample
44
  X = all_X[idx, :n_features]
@@ -85,25 +98,26 @@ def plot_lda_pca(n_samples = 50, n_features = 4):
85
  title = "2-D projection of Iris dataset using LDA and PCA"
86
  with gr.Blocks(title=title) as demo:
87
  gr.Markdown(f"# {title}")
88
- gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Factor Analysis (FA) for model selection by observing the likelihood of a held-out dataset with added noise <br>"
 
 
89
  " The number of samples (n_samples) will determine the number of data points to produce. <br>"
90
- " The number of components (n_components) will determine the number of components each method will fit to, and will affect the likelihood of the held-out set. <br>"
91
- " The number of features (n_components) determine the number of features the toy dataset X variable will have. <br>"
92
  " For further details please see the sklearn docs:"
93
  )
94
 
95
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
96
 
97
- gr.Markdown(" **Dataset** : A toy dataset with corrupted with homoscedastic noise (noise variance is the same for each feature) or heteroscedastic noise (noise variance is the different for each feature) . <br>")
98
- gr.Markdown(" Different number of features and number of components affect how well the low rank space is recovered. <br>"
99
- " Larger Depth trying to overfit and learn even the finner details of the data.<br>"
100
- )
101
 
 
102
  max_samples = len(iris.data)
 
103
  with gr.Row():
104
  n_samples = gr.Slider(value=100, minimum=10, maximum=max_samples, step=10, label="n_samples")
105
 
106
- n_features = gr.Slider(value=2, minimum=2, maximum=4, step=1, label="n_features")
107
 
108
  btn = gr.Button(value="Run")
109
  btn.click(plot_lda_pca, inputs = [n_samples, n_features], outputs= gr.Plot(label='PCA vs LDA clustering') ) #
 
36
 
37
 
38
  def plot_lda_pca(n_samples = 50, n_features = 4):
39
+
40
+ '''
41
+ Function to plot LDA and PCA of Iris dataset
42
+
43
+ Parameters
44
+ ----------
45
+ n_samples : int, optional
46
+ Number of samples to use from the dataset. The default is 50.
47
+ n_features : int, optional
48
+ Number of features to use from dataset. The default is 4.
49
+
50
+ '''
51
 
52
  # print(f"all X is: {all_X}")
53
 
54
+
55
  idx = np.random.randint(0, len(iris.data), n_samples)
56
  # sub-sample
57
  X = all_X[idx, :n_features]
 
98
  title = "2-D projection of Iris dataset using LDA and PCA"
99
  with gr.Blocks(title=title) as demo:
100
  gr.Markdown(f"# {title}")
101
+ gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Linear Discriminant Analysis (LDA) to cluster the Iris dataset based on provided features. <br>"
102
+ " PCA applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. <br>"
103
+ " LDA is a supervised method that tries to identify attributes that account for the most variance between classes using the known class labels. <br>"
104
  " The number of samples (n_samples) will determine the number of data points to produce. <br>"
105
+ " The number of components is fixed to 2 for this 2-D visualisation and LDA requires the number of components to be the number of classes -1, which in this case is (3-1) = 2"
106
+ " The number of features (n_features) determine the number of features from the IRIS dataset to use for the model fitting. <br>"
107
  " For further details please see the sklearn docs:"
108
  )
109
 
110
  gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
111
 
112
+ gr.Markdown(" **Dataset** : The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes or features: sepal length, sepal width, petal length and petal width. . <br>")
 
 
 
113
 
114
+ # get max possible samples and features
115
  max_samples = len(iris.data)
116
+ max_features = iris.data.shape[1]
117
  with gr.Row():
118
  n_samples = gr.Slider(value=100, minimum=10, maximum=max_samples, step=10, label="n_samples")
119
 
120
+ n_features = gr.Slider(value=2, minimum=2, maximum=max_features, step=1, label="n_features")
121
 
122
  btn = gr.Button(value="Run")
123
  btn.click(plot_lda_pca, inputs = [n_samples, n_features], outputs= gr.Plot(label='PCA vs LDA clustering') ) #