destiratnakomala commited on
Commit
6c88ced
1 Parent(s): 0f22bf1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +301 -0
app.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ import seaborn as sns
5
+ from sklearn.model_selection import train_test_split, GridSearchCV
6
+ from sklearn.linear_model import LinearRegression, Lasso
7
+ from sklearn.ensemble import RandomForestRegressor
8
+ from sklearn.metrics import mean_squared_error, r2_score
9
+ import joblib
10
+ import streamlit as st
11
+ import plotly.express as px
12
+ import plotly.figure_factory as ff
13
+
14
+ # Main function
15
+ def main():
16
+ st.set_page_config(page_title="Data Automation-Machine Learning")
17
+ st.title("Machine Learning")
18
+
19
+ # Step 1: Upload Data
20
+ with st.expander("1: Add Your Data Source"):
21
+ uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
22
+
23
+ with st.expander("2: DataSet Preview"):
24
+ if uploaded_file is not None:
25
+
26
+ data = pd.read_csv(uploaded_file)
27
+ # Step 2: Data Overview
28
+ view1, view2,view3, view4 = st.columns(4)
29
+ with view1:
30
+ st.write("Data Overview")
31
+ st.dataframe(data.head())
32
+ with view2:
33
+ st.write(" Data Description")
34
+ st.write(data.describe())
35
+ with view3:
36
+ st.write(" Missing Values")
37
+ st.write(data.isnull().sum())
38
+ with view4:
39
+ st.write(" Data Types")
40
+ st.write(data.dtypes)
41
+
42
+
43
+ with st.expander("3: Data Cleaning"):
44
+ # Step 3: Data Cleaning
45
+ clean1, clean2, clean3 = st.columns(3)
46
+ with clean1:
47
+ st.write(" Data Summary Before Cleaning")
48
+ st.write(data.describe())
49
+ with clean2:
50
+ st.write("Missing Values Before Cleaning:")
51
+ st.write(data.isnull().sum())
52
+ with clean3:
53
+ # Visualize missing values
54
+ if st.checkbox("Show Missing Values Heatmap"):
55
+ fig, ax = plt.subplots(figsize=(10, 6))
56
+ sns.heatmap(data.isnull(), cbar=False, cmap='viridis', ax=ax)
57
+ plt.title("Missing Values Heatmap")
58
+ st.pyplot(fig)
59
+
60
+ clean4, clean5= st.columns(2)
61
+ with clean4:
62
+ # Remove duplicates
63
+ if st.checkbox("Remove Duplicate Rows"):
64
+ initial_shape = data.shape
65
+ data = data.drop_duplicates()
66
+ st.success(f"Removed {initial_shape[0] - data.shape[0]} duplicate rows.")
67
+
68
+
69
+ with clean5:
70
+ # Handle missing values
71
+ missing_strategy = st.selectbox(
72
+ "Choose a strategy for handling missing values",
73
+ options=["Drop Missing Values", "Fill with Mean", "Fill with Median", "Fill with Mode", "Do Nothing"]
74
+ )
75
+
76
+ if st.button("Apply Missing Value Strategy"):
77
+ if missing_strategy == "Drop Missing Values":
78
+ data.dropna(inplace=True)
79
+ st.success("Dropped rows with missing values.")
80
+ elif missing_strategy == "Fill with Mean":
81
+ data.fillna(data.mean(), inplace=True)
82
+ st.success("Filled missing values with the mean.")
83
+ elif missing_strategy == "Fill with Median":
84
+ data.fillna(data.median(), inplace=True)
85
+ st.success("Filled missing values with the median.")
86
+ elif missing_strategy == "Fill with Mode":
87
+ for column in data.select_dtypes(include=['object']).columns:
88
+ data[column].fillna(data[column].mode()[0], inplace=True)
89
+ st.success("Filled missing values with the mode for categorical columns.")
90
+ elif missing_strategy == "Do Nothing":
91
+ st.info("No changes made to missing values.")
92
+ clean7, clean8= st.columns(2)
93
+ with clean7:
94
+ # Display basic info after cleaning
95
+ st.write(" Data Summary After Cleaning")
96
+ st.write(data.describe())
97
+ with clean8:
98
+ st.write("Missing Values After Cleaning:")
99
+ st.write(data.isnull().sum())
100
+
101
+ with st.expander('4: EDA'):
102
+
103
+ # Step 4: Exploratory Data Analysis (EDA)
104
+ st.write("Correlation Matrix")
105
+
106
+ # Calculate the correlation matrix
107
+ correlation_matrix = data.corr()
108
+
109
+ # Create a heatmap using Plotly
110
+ fig = ff.create_annotated_heatmap(
111
+ z=correlation_matrix.values,
112
+ x=list(correlation_matrix.columns),
113
+ y=list(correlation_matrix.index),
114
+ )
115
+
116
+ # Update layout for better readability
117
+ fig.update_layout(
118
+ title="Correlation Matrix",
119
+ xaxis_title="Features",
120
+ yaxis_title="Features",
121
+ width=700, # Adjust width as needed
122
+ height=500, # Adjust height as needed
123
+ )
124
+
125
+ # Display the figure in Streamlit
126
+ st.plotly_chart(fig)
127
+ eda1, eda2= st.columns(2)
128
+ with eda1:
129
+ # Plotting distributions for numerical features
130
+ if st.checkbox("Show Distribution Plots for Numeric Features"):
131
+ for column in data.select_dtypes(include=[int, float]).columns:
132
+ fig, ax = plt.subplots(figsize=(8, 4))
133
+ sns.histplot(data[column], bins=30, kde=True, ax=ax)
134
+ plt.title(f'Distribution of {column}')
135
+ st.pyplot(fig)
136
+ with eda2:
137
+ # Boxplots for outlier detection
138
+ if st.checkbox("Show Boxplots for Numeric Features"):
139
+ for column in data.select_dtypes(include=[int, float]).columns:
140
+ fig, ax = plt.subplots(figsize=(8, 4))
141
+ sns.boxplot(x=data[column], ax=ax)
142
+ plt.title(f'Boxplot of {column}')
143
+ st.pyplot(fig)
144
+
145
+ with st.expander("5: Feature Engineering"):
146
+ target_column = st.selectbox("Select the target variable", options=data.columns)
147
+ feature_columns = st.multiselect("Select features", options=data.columns.drop(target_column))
148
+ with st.expander("6: Modelling "):
149
+ # Initialize session state for storing results
150
+ if 'model_plot' not in st.session_state:
151
+ st.session_state.model_plot = None
152
+ if 'model_metrics' not in st.session_state:
153
+ st.session_state.model_metrics = None
154
+
155
+ # Model training
156
+ model_option = st.selectbox("Select Regression Model", options=["Linear Regression", "Random Forest Regression", "Lasso Regression"])
157
+
158
+ if st.button("Train Model (Without Hyperparameter Tuning)"):
159
+ if feature_columns:
160
+ X = data[feature_columns]
161
+ y = data[target_column]
162
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
163
+
164
+ # Initialize the selected model
165
+ if model_option == "Linear Regression":
166
+ model = LinearRegression()
167
+ elif model_option == "Random Forest Regression":
168
+ model = RandomForestRegressor(random_state=42)
169
+ elif model_option == "Lasso Regression":
170
+ model = Lasso()
171
+
172
+ # Train model
173
+ model.fit(X_train, y_train)
174
+
175
+ # Save the model
176
+ model_name = st.text_input('Enter model name', 'my_model')
177
+ model_file_path = f'{model_name}.pkl'
178
+ joblib.dump(model, model_file_path)
179
+ st.success("Model saved successfully!")
180
+
181
+ # Add a download button for the model
182
+ with open(model_file_path, "rb") as f:
183
+ st.download_button(
184
+ label="Download Model",
185
+ data=f,
186
+ file_name=model_file_path,
187
+ mime="application/octet-stream"
188
+ )
189
+
190
+ # Make predictions
191
+ y_pred = model.predict(X_test)
192
+
193
+ # Calculate metrics
194
+ mse = mean_squared_error(y_test, y_pred)
195
+ r2 = r2_score(y_test, y_pred)
196
+
197
+ # Step 7: Visualization of Predictions (Line Plot)
198
+ st.session_state.model_plot = (y_test.reset_index(drop=True), y_pred)
199
+ st.session_state.model_metrics = (mse, r2)
200
+
201
+ # Show results
202
+ st.success(f"Mean Squared Error: {mse:.2f}")
203
+ st.success(f"R^2 Score: {r2:.2f}")
204
+
205
+
206
+
207
+
208
+ # Display model plot if available
209
+ if st.session_state.model_plot is not None:
210
+ y_test, y_pred = st.session_state.model_plot
211
+ fig, ax = plt.subplots(figsize=(10, 6))
212
+ ax.plot(y_test, label="True Values", color="blue", linestyle="--")
213
+ ax.plot(y_pred, label="Predicted Values", color="orange")
214
+ ax.set_title(f'{model_option}: True Values vs Predictions')
215
+ ax.set_xlabel('Index')
216
+ ax.set_ylabel('Values')
217
+ ax.legend()
218
+ st.pyplot(fig)
219
+
220
+ # Display metrics if available
221
+ if st.session_state.model_metrics is not None:
222
+ mse, r2 = st.session_state.model_metrics
223
+ st.success(f"Mean Squared Error: {mse:.2f}")
224
+ st.success(f"R^2 Score: {r2:.2f}")
225
+
226
+
227
+ with st.expander("7: HyperParameter"):
228
+ # Step 8: Hyperparameter Tuning
229
+ st.write("Hyperparameter Tuning")
230
+ if feature_columns:
231
+ hyperparam_model_option = st.selectbox("Select Model for Hyperparameter Tuning", options=["Linear Regression", "Random Forest Regression", "Lasso Regression"])
232
+
233
+ if hyperparam_model_option == "Linear Regression":
234
+ param_grid = {'fit_intercept': [True, False]}
235
+ elif hyperparam_model_option == "Random Forest Regression":
236
+ param_grid = {'n_estimators': [50, 100, 200], 'max_depth': [10, 20, None], 'min_samples_split': [2, 5, 10]}
237
+ elif hyperparam_model_option == "Lasso Regression":
238
+ param_grid = {'alpha': [0.01, 0.1, 1, 10], 'max_iter': [1000, 5000, 10000]}
239
+
240
+ if st.button("Train Model with Hyperparameter Tuning"):
241
+ # Prepare data for training
242
+ X = data[feature_columns]
243
+ y = data[target_column]
244
+
245
+ # Split data into training and testing sets
246
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
247
+
248
+ # Initialize and perform hyperparameter tuning
249
+ if hyperparam_model_option == "Linear Regression":
250
+ model = LinearRegression()
251
+ grid_search = GridSearchCV(model, param_grid, cv=5)
252
+ elif hyperparam_model_option == "Random Forest Regression":
253
+ model = RandomForestRegressor(random_state=42)
254
+ grid_search = GridSearchCV(model, param_grid, cv=5)
255
+ elif hyperparam_model_option == "Lasso Regression":
256
+ model = Lasso()
257
+ grid_search = GridSearchCV(model, param_grid, cv=5)
258
+
259
+ # Train the model
260
+ grid_search.fit(X_train, y_train)
261
+
262
+ # Make predictions
263
+ best_model = grid_search.best_estimator_
264
+ y_pred = best_model.predict(X_test)
265
+
266
+ # Calculate metrics
267
+ mse = mean_squared_error(y_test, y_pred)
268
+ r2 = r2_score(y_test, y_pred)
269
+
270
+ # Step 9: Visualization of Predictions (Line Plot)
271
+ st.session_state.model_plot = (y_test.reset_index(drop=True), y_pred)
272
+ st.session_state.model_metrics = (mse, r2)
273
+
274
+ # Show results
275
+ st.success(f"Best Parameters: {grid_search.best_params_}")
276
+ st.success(f"Mean Squared Error: {mse:.2f}")
277
+ st.success(f"R^2 Score: {r2:.2f}")
278
+
279
+ # Display hyperparameter tuned model plot if available
280
+ if st.session_state.model_plot is not None:
281
+ y_test, y_pred = st.session_state.model_plot
282
+ fig, ax = plt.subplots(figsize=(10, 6))
283
+ ax.plot(y_test, label="True Values", color="blue", linestyle="--")
284
+ ax.plot(y_pred, label="Predicted Values", color="orange")
285
+ ax.set_title(f'{hyperparam_model_option}: True Values vs Predictions (Tuned)')
286
+ ax.set_xlabel('Index')
287
+ ax.set_ylabel('Values')
288
+ ax.legend()
289
+ st.pyplot(fig)
290
+
291
+ # Display metrics if available
292
+ if st.session_state.model_metrics is not None:
293
+ mse, r2 = st.session_state.model_metrics
294
+ st.success(f"Mean Squared Error: {mse:.2f}")
295
+ st.success(f"R^2 Score: {r2:.2f}")
296
+
297
+
298
+
299
+ # Run the app
300
+ if __name__ == "__main__":
301
+ main()