Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -227,13 +227,15 @@ if 'preprocessor' not in st.session_state:
|
|
227 |
|
228 |
# Sidebar Navigation
|
229 |
st.sidebar.title("🔮 Data Wizard Pro")
|
|
|
230 |
app_mode = st.sidebar.radio("Navigate", [
|
231 |
"Data Upload",
|
232 |
"Smart Cleaning",
|
233 |
"Advanced EDA",
|
234 |
"Model Training",
|
235 |
"Predictions",
|
236 |
-
"Visualization Lab"
|
|
|
237 |
])
|
238 |
|
239 |
# --- Progress Bar ----
|
@@ -366,19 +368,46 @@ elif app_mode == "Smart Cleaning":
|
|
366 |
missing_strategy_num = "Median"
|
367 |
missing_strategy_cat = "Most Frequent"
|
368 |
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
if method == "KNN Imputation":
|
383 |
knn_neighbors = st.slider("KNN Neighbors", 2, 10, 5, help="Number of neighbors for KNN Imputation.") #Parameter
|
384 |
|
@@ -959,4 +988,98 @@ elif app_mode == "Visualization Lab":
|
|
959 |
|
960 |
|
961 |
except Exception as e:
|
962 |
-
st.error(f"Error generating visualization: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
# Sidebar Navigation
|
229 |
st.sidebar.title("🔮 Data Wizard Pro")
|
230 |
+
# Replace the existing app_mode section with this:
|
231 |
app_mode = st.sidebar.radio("Navigate", [
|
232 |
"Data Upload",
|
233 |
"Smart Cleaning",
|
234 |
"Advanced EDA",
|
235 |
"Model Training",
|
236 |
"Predictions",
|
237 |
+
"Visualization Lab",
|
238 |
+
"Neural Network Studio" # New option
|
239 |
])
|
240 |
|
241 |
# --- Progress Bar ----
|
|
|
368 |
missing_strategy_num = "Median"
|
369 |
missing_strategy_cat = "Most Frequent"
|
370 |
|
371 |
+
elif clean_action == "Handle Missing Values":
|
372 |
+
st.markdown("**Configure how missing values will be handled.**", unsafe_allow_html=True)
|
373 |
+
all_impute_cols = ["All Columns"] + df.columns.tolist()
|
374 |
+
impute_cols = st.multiselect("Columns to Impute", all_impute_cols, default=["All Columns"], help="Select the columns with missing values to impute. Choose 'All Columns' to apply to all columns with missing values.")
|
375 |
+
if "All Columns" in impute_cols:
|
376 |
+
impute_cols = df.columns.tolist()
|
377 |
+
|
378 |
+
method = st.selectbox("Imputation Method", [
|
379 |
+
"KNN Imputation",
|
380 |
+
"Median Fill",
|
381 |
+
"Mean Fill",
|
382 |
+
"Drop Missing"
|
383 |
+
], help="Choose the method to use for imputing missing values.")
|
384 |
+
|
385 |
+
elif clean_action == "Neural Network Prep":
|
386 |
+
st.markdown("**Neural Network Specific Preparation**", unsafe_allow_html=True)
|
387 |
+
|
388 |
+
# Make dynamic to check if the models can allow it
|
389 |
+
validModels=["RNN, CNN"]
|
390 |
+
|
391 |
+
model_Choice_text = st.radio("What's a use case for Models?",
|
392 |
+
options= validModels)
|
393 |
+
y/ st.toggle
|
394 |
+
|
395 |
+
display a string or some other feedback
|
396 |
+
st.info('Select a machine learning task below!')
|
397 |
+
|
398 |
+
## to check which text based mode
|
399 |
+
|
400 |
+
validColumnNumerical_cols = df.select_dtypes(include=['int','float']).columns.tolist()
|
401 |
+
numcol_cols = st.multiselect("Text use Colimns: or sequence for model usage :D, to generate the code - to understand how each one plays out D", options =validColumnNumerical_cols )
|
402 |
+
|
403 |
+
#### Make different selections here now
|
404 |
+
####
|
405 |
+
st.code('Code example is generated.')
|
406 |
+
"" Make each configuration do an function or callback" just one press and more to learn""" ### Show code. You do need check what variables and show output that goes on
|
407 |
+
#### then, what did output happen if you pick A or B variable selection.
|
408 |
+
|
409 |
+
seq_length = st.number_input("Sequence Length (for RNN)", 10, 100, 30, help =" Length to do that. make them more power ")
|
410 |
+
|
411 |
if method == "KNN Imputation":
|
412 |
knn_neighbors = st.slider("KNN Neighbors", 2, 10, 5, help="Number of neighbors for KNN Imputation.") #Parameter
|
413 |
|
|
|
988 |
|
989 |
|
990 |
except Exception as e:
|
991 |
+
st.error(f"Error generating visualization: {e}")
|
992 |
+
|
993 |
+
elif app_mode == "Neural Network Studio":
|
994 |
+
st.title("🧠 Deep Learning Studio")
|
995 |
+
|
996 |
+
# Network Type Selection
|
997 |
+
nn_type = st.selectbox("Network Type", [
|
998 |
+
"Multilayer Perceptron (MLP)",
|
999 |
+
"Convolutional Neural Network (CNN)",
|
1000 |
+
"Recurrent Neural Network (RNN/LSTM)",
|
1001 |
+
"Transformer",
|
1002 |
+
"Autoencoder"
|
1003 |
+
])
|
1004 |
+
|
1005 |
+
# Dynamic Configuration
|
1006 |
+
with st.expander("Architecture Configuration"):
|
1007 |
+
if nn_type == "MLP":
|
1008 |
+
num_layers = st.slider("Hidden Layers", 1, 10, 3)
|
1009 |
+
activation = st.selectbox("Activation Function", ["relu", "sigmoid", "tanh"])
|
1010 |
+
|
1011 |
+
elif nn_type == "CNN":
|
1012 |
+
conv_layers = st.slider("Convolutional Layers", 1, 5, 2)
|
1013 |
+
filters = st.number_input("Filters per Layer", 32, 512, 64)
|
1014 |
+
|
1015 |
+
elif nn_type == "RNN/LSTM":
|
1016 |
+
rnn_units = st.number_input("LSTM Units", 64, 1024, 128)
|
1017 |
+
return_sequences = st.checkbox("Return Sequences")
|
1018 |
+
|
1019 |
+
# Training Configuration
|
1020 |
+
with st.expander("Training Configuration"):
|
1021 |
+
batch_size = st.selectbox("Batch Size", [32, 64, 128, 256], index=1)
|
1022 |
+
epochs = st.slider("Epochs", 1, 100, 20)
|
1023 |
+
learning_rate = st.number_input("Learning Rate", 1e-5, 1e-1, 1e-3)
|
1024 |
+
optimizer = st.selectbox("Optimizer", ["Adam", "SGD", "RMSprop"])
|
1025 |
+
|
1026 |
+
# Place this after the Training Configuration expander
|
1027 |
+
if st.button("Start Training"):
|
1028 |
+
loss_placeholder = st.empty()
|
1029 |
+
loss_history = []
|
1030 |
+
|
1031 |
+
# Simulated training loop
|
1032 |
+
for epoch in range(epochs):
|
1033 |
+
# Simulate training progress
|
1034 |
+
loss = np.random.rand() # Replace with actual training logic
|
1035 |
+
loss_history.append(loss)
|
1036 |
+
|
1037 |
+
# Update plot
|
1038 |
+
plt.figure()
|
1039 |
+
plt.plot(loss_history, label="Training Loss")
|
1040 |
+
plt.xlabel("Epoch")
|
1041 |
+
plt.ylabel("Loss")
|
1042 |
+
plt.legend()
|
1043 |
+
loss_placeholder.pyplot(plt)
|
1044 |
+
plt.close()
|
1045 |
+
|
1046 |
+
st.write(f"Epoch {epoch+1}/{epochs} - Loss: {loss:.4f}")
|
1047 |
+
|
1048 |
+
st.success("Training complete!")
|
1049 |
+
|
1050 |
+
# Educational Content
|
1051 |
+
with st.expander("📚 Network Fundamentals"):
|
1052 |
+
st.markdown("""
|
1053 |
+
**MLP Fundamentals**
|
1054 |
+
- Basic feedforward architecture
|
1055 |
+
- Ideal for tabular data
|
1056 |
+
- Activation functions guide
|
1057 |
+
""")
|
1058 |
+
|
1059 |
+
if nn_type == "CNN":
|
1060 |
+
st.markdown("""
|
1061 |
+
**CNN Concepts**
|
1062 |
+
- Convolutional filters
|
1063 |
+
- Pooling operations
|
1064 |
+
- Feature hierarchy
|
1065 |
+
""")
|
1066 |
+
|
1067 |
+
if st.checkbox("Enable Model Interpretation"):
|
1068 |
+
interpretation_type = st.selectbox("Interpretation Method", [
|
1069 |
+
"SHAP Values",
|
1070 |
+
"LIME",
|
1071 |
+
"Attention Visualization"
|
1072 |
+
])
|
1073 |
+
|
1074 |
+
if interpretation_type == "SHAP Values":
|
1075 |
+
st.markdown("""
|
1076 |
+
**SHAP (SHapley Additive exPlanations)**
|
1077 |
+
- Explains individual predictions
|
1078 |
+
- Shows feature importance
|
1079 |
+
""")
|
1080 |
+
|
1081 |
+
if st.button("Generate SHAP Values"):
|
1082 |
+
with st.spinner("Calculating SHAP values..."):
|
1083 |
+
# Add SHAP calculation logic here
|
1084 |
+
st.success("SHAP values calculated!")
|
1085 |
+
st.image("shap_plot.png") # Replace with actual SHAP plot
|