saifsunny commited on
Commit
268d97f
·
1 Parent(s): 5b14f9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -1
app.py CHANGED
@@ -136,6 +136,98 @@ if st.button('Submit'):
136
  st.write('Ensemble Model Recall:', ensemble_recall)
137
  st.write('Ensemble Model F1 Score:', ensemble_f1score)
138
  st.write('------------------------------------------------------------------------------------------------------')
139
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
 
 
136
  st.write('Ensemble Model Recall:', ensemble_recall)
137
  st.write('Ensemble Model F1 Score:', ensemble_f1score)
138
  st.write('------------------------------------------------------------------------------------------------------')
139
+
140
+ # Right column content
141
+ with right_column:
142
+ for model in models_to_run:
143
+ # Train the selected model
144
+ model.fit(X_train, y_train)
145
+
146
+ # Make predictions on the test set
147
+ model_predictions = model.predict(user_input)
148
+ model_prob = model.predict_proba(user_input)[:, 1]
149
+
150
+ # Evaluate the model's performance on the test set
151
+ model_accuracy = accuracy_score(y_test, model.predict(X_test))
152
+ model_precision = precision_score(y_test, model.predict(X_test))
153
+ model_recall = recall_score(y_test, model.predict(X_test))
154
+ model_f1score = f1_score(y_test, model.predict(X_test))
155
+
156
+ if model_predictions == 1:
157
+ st.write(f'According to {type(model).__name__} Model You have a **Very High Chance (1)** of Diabetes.')
158
+ st.write(f'Diabetes Probability: ', (model_prob* 100))
159
+
160
+ else:
161
+ st.write(f'According to {type(model).__name__} Model You have a **Very Low Chance (0)** of Diabetes.')
162
+ st.write(f'Diabetes Probability: ', (model_prob* 100))
163
+
164
+ st.write(f'{type(model).__name__} Accuracy:', model_accuracy)
165
+ st.write(f'{type(model).__name__} Precision:', model_precision)
166
+ st.write(f'{type(model).__name__} Recall:', model_recall)
167
+ st.write(f'{type(model).__name__} F1 Score:', model_f1score)
168
+ st.write('------------------------------------------------------------------------------------------------------')
169
+
170
+
171
+ # Initialize lists to store model names and their respective performance metrics
172
+ model_names = ['Ensemble']
173
+ accuracies = [ensamble_accuracy]
174
+ precisions = [ensamble_precision]
175
+ recalls = [ensamble_recall]
176
+ f1_scores = [ensamble_f1score]
177
+
178
+ # Loop through the selected models to compute their performance metrics
179
+ for model in models_to_run:
180
+ model_names.append(type(model).__name__)
181
+ model.fit(X_train, y_train)
182
+ model_predictions = model.predict(X_test)
183
+ accuracies.append(accuracy_score(y_test, model_predictions))
184
+ precisions.append(precision_score(y_test, model_predictions))
185
+ recalls.append(recall_score(y_test, model_predictions))
186
+ f1_scores.append(f1_score(y_test, model_predictions))
187
+
188
+ # Create a DataFrame to store the performance metrics
189
+ metrics_df = pd.DataFrame({
190
+ 'Model': model_names,
191
+ 'Accuracy': accuracies,
192
+ 'Precision': precisions,
193
+ 'Recall': recalls,
194
+ 'F1 Score': f1_scores
195
+ })
196
+
197
+ # Get the model labels
198
+ model_labels = generate_model_labels(metrics_df['Model'])
199
+
200
+ # Plot the comparison graphs
201
+ plt.figure(figsize=(12, 10))
202
+
203
+ # Accuracy comparison
204
+ plt.subplot(2, 2, 1)
205
+ plt.bar(model_labels, metrics_df['Accuracy'], color='skyblue')
206
+ plt.title('Accuracy Comparison')
207
+ plt.ylim(0, 1)
208
+
209
+ # Precision comparison
210
+ plt.subplot(2, 2, 2)
211
+ plt.bar(model_labels, metrics_df['Precision'], color='orange')
212
+ plt.title('Precision Comparison')
213
+ plt.ylim(0, 1)
214
+
215
+ # Recall comparison
216
+ plt.subplot(2, 2, 3)
217
+ plt.bar(model_labels, metrics_df['Recall'], color='green')
218
+ plt.title('Recall Comparison')
219
+ plt.ylim(0, 1)
220
+
221
+ # F1 Score comparison
222
+ plt.subplot(2, 2, 4)
223
+ plt.bar(model_labels, metrics_df['F1 Score'], color='purple')
224
+ plt.title('F1 Score Comparison')
225
+ plt.ylim(0, 1)
226
+
227
+ # Adjust layout to prevent overlapping of titles
228
+ plt.tight_layout()
229
+
230
+ # Display the graphs in Streamlit
231
+ st.pyplot()
232
 
233