Spaces:
Sleeping
Sleeping
Update pages/1_Random_Data.py
Browse files- pages/1_Random_Data.py +27 -21
pages/1_Random_Data.py
CHANGED
|
@@ -6,8 +6,6 @@ from sklearn.datasets import make_moons, make_circles, make_classification
|
|
| 6 |
from sklearn.model_selection import train_test_split
|
| 7 |
from sklearn.preprocessing import StandardScaler
|
| 8 |
|
| 9 |
-
st.set_option('deprecation.showPyplotGlobalUse', False)
|
| 10 |
-
|
| 11 |
st.title("๐ข Random Data Playground")
|
| 12 |
st.markdown("Define hyperparameters, generate synthetic data, and train your neural network interactively.")
|
| 13 |
|
|
@@ -53,6 +51,9 @@ if submitted:
|
|
| 53 |
st.pyplot(fig)
|
| 54 |
|
| 55 |
if st.button("๐ Train Model"):
|
|
|
|
|
|
|
|
|
|
| 56 |
# Build model
|
| 57 |
model = tf.keras.Sequential()
|
| 58 |
model.add(tf.keras.layers.Input(shape=(2,)))
|
|
@@ -63,25 +64,24 @@ if submitted:
|
|
| 63 |
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
|
| 64 |
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
|
| 65 |
|
| 66 |
-
#
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
| 78 |
|
| 79 |
st.success("โ
Model Trained!")
|
| 80 |
|
| 81 |
-
# Final loss values
|
| 82 |
-
st.write(f"๐ Final Train Loss: **{train_loss[-1]:.4f}**")
|
| 83 |
-
st.write(f"๐งช Final Validation Loss: **{val_loss[-1]:.4f}**")
|
| 84 |
-
|
| 85 |
# Decision Region After Training
|
| 86 |
st.subheader("๐ Decision Region After Training")
|
| 87 |
preds = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
|
|
@@ -90,12 +90,18 @@ if submitted:
|
|
| 90 |
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=y, cmap=plt.cm.bwr, edgecolor='k')
|
| 91 |
st.pyplot(fig)
|
| 92 |
|
| 93 |
-
# Loss
|
| 94 |
-
st.subheader("
|
| 95 |
fig, ax = plt.subplots()
|
| 96 |
-
ax.plot(
|
| 97 |
-
ax.plot(val_loss, label='Validation Loss')
|
| 98 |
ax.set_xlabel("Epochs")
|
| 99 |
ax.set_ylabel("Loss")
|
| 100 |
ax.legend()
|
| 101 |
st.pyplot(fig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
from sklearn.model_selection import train_test_split
|
| 7 |
from sklearn.preprocessing import StandardScaler
|
| 8 |
|
|
|
|
|
|
|
| 9 |
st.title("๐ข Random Data Playground")
|
| 10 |
st.markdown("Define hyperparameters, generate synthetic data, and train your neural network interactively.")
|
| 11 |
|
|
|
|
| 51 |
st.pyplot(fig)
|
| 52 |
|
| 53 |
if st.button("๐ Train Model"):
|
| 54 |
+
progress_bar = st.progress(0)
|
| 55 |
+
status_text = st.empty()
|
| 56 |
+
|
| 57 |
# Build model
|
| 58 |
model = tf.keras.Sequential()
|
| 59 |
model.add(tf.keras.layers.Input(shape=(2,)))
|
|
|
|
| 64 |
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
|
| 65 |
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
|
| 66 |
|
| 67 |
+
# Custom training loop to show progress
|
| 68 |
+
class StreamlitCallback(tf.keras.callbacks.Callback):
|
| 69 |
+
def on_epoch_end(self, epoch, logs=None):
|
| 70 |
+
percent = (epoch + 1) / epochs
|
| 71 |
+
progress_bar.progress(min(percent, 1.0))
|
| 72 |
+
status_text.text(f"Epoch {epoch+1}/{epochs} - Loss: {logs['loss']:.4f}, Val Loss: {logs['val_loss']:.4f}")
|
| 73 |
|
| 74 |
+
history = model.fit(
|
| 75 |
+
X_train, y_train,
|
| 76 |
+
batch_size=batch_size,
|
| 77 |
+
epochs=epochs,
|
| 78 |
+
validation_split=0.2,
|
| 79 |
+
verbose=0,
|
| 80 |
+
callbacks=[StreamlitCallback()]
|
| 81 |
+
)
|
| 82 |
|
| 83 |
st.success("โ
Model Trained!")
|
| 84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
# Decision Region After Training
|
| 86 |
st.subheader("๐ Decision Region After Training")
|
| 87 |
preds = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
|
|
|
|
| 90 |
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=y, cmap=plt.cm.bwr, edgecolor='k')
|
| 91 |
st.pyplot(fig)
|
| 92 |
|
| 93 |
+
# Loss Plot
|
| 94 |
+
st.subheader("๐ Training vs Validation Loss")
|
| 95 |
fig, ax = plt.subplots()
|
| 96 |
+
ax.plot(history.history['loss'], label='Train Loss')
|
| 97 |
+
ax.plot(history.history['val_loss'], label='Validation Loss')
|
| 98 |
ax.set_xlabel("Epochs")
|
| 99 |
ax.set_ylabel("Loss")
|
| 100 |
ax.legend()
|
| 101 |
st.pyplot(fig)
|
| 102 |
+
|
| 103 |
+
# Loss values
|
| 104 |
+
final_train_loss = history.history['loss'][-1]
|
| 105 |
+
final_val_loss = history.history['val_loss'][-1]
|
| 106 |
+
st.write(f"๐งฎ **Final Train Loss**: `{final_train_loss:.4f}`")
|
| 107 |
+
st.write(f"๐งช **Final Validation Loss**: `{final_val_loss:.4f}`")
|