File size: 3,443 Bytes
dd6fc41
 
 
 
 
 
 
c89ca4b
dd6fc41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f3b8fe
 
 
 
 
 
dd6fc41
 
 
5ffd414
3bd9c33
c89ca4b
 
 
 
dd6fc41
 
 
 
 
 
 
 
3bd9c33
dd6fc41
 
 
 
6f3b8fe
 
 
 
 
 
 
 
 
 
3bd9c33
6f3b8fe
 
3bd9c33
6f3b8fe
 
 
 
3bd9c33
6f3b8fe
 
 
 
 
 
3bd9c33
6f3b8fe
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import streamlit as st
import os

# Load the MNIST dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# Preprocess the data
train_images = train_images.reshape((60000, 28, 28, 1)).astype("float32") / 255
test_images = test_images.reshape((10000, 28, 28, 1)).astype("float32") / 255

# Convert labels to categorical format
train_labels = keras.utils.to_categorical(train_labels, 10)
test_labels = keras.utils.to_categorical(test_labels, 10)

# Define the CNN model
def create_model():
    model = keras.Sequential([
        layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)),
        layers.MaxPooling2D((2, 2)),
        layers.Conv2D(64, (3, 3), activation="relu"),
        layers.MaxPooling2D((2, 2)),
        layers.Conv2D(64, (3, 3), activation="relu"),
        layers.Flatten(),
        layers.Dense(64, activation="relu"),
        layers.Dense(10, activation="softmax")
    ])
    
    model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
    return model

# Streamlit UI
st.title("CNN for MNIST Classification")

# Check if model is saved
model_path = "mnist_cnn_model.h5"

if st.button("Train Model"):
    model = create_model()
    with st.spinner("Training..."):
        st.text("Training now:")
        history = model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=10, batch_size=64)

    # Save the model
    model.save(model_path)

    # Plot training loss and accuracy
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))

    ax1.plot(history.history["loss"], label="Train Loss")
    ax1.plot(history.history["val_loss"], label="Val Loss")
    ax1.set_title("Training and Validation Loss")
    ax1.set_xlabel("Epoch")
    ax1.set_ylabel("Loss")
    ax1.legend()

    ax2.plot(history.history["accuracy"], label="Train Accuracy")
    ax2.plot(history.history["val_accuracy"], label="Val Accuracy")
    ax2.set_title("Training and Validation Accuracy")
    ax2.set_xlabel("Epoch")
    ax2.set_ylabel("Accuracy")
    ax2.legend()

    st.pyplot(fig)

    # Evaluate the model on test data
    test_preds = np.argmax(model.predict(test_images), axis=1)
    true_labels = np.argmax(test_labels, axis=1)

    # Store the test labels globally for later use
    st.session_state['true_labels'] = true_labels

    # Classification report
    report = classification_report(true_labels, test_preds, digits=4)
    st.text("Classification Report:")
    st.text(report)

# Testing with a specific index
index = st.number_input("Enter an index (0-9999) to test:", min_value=0, max_value=9999, step=1)

def test_index_prediction(index):
    image = test_images[index].reshape(28, 28)
    st.image(image, caption=f"True Label: {st.session_state['true_labels'][index]}", use_column_width=True)

    # Reload the model if needed
    if not os.path.exists(model_path):
        st.error("Train the model first.")
        return

    model = keras.models.load_model(model_path)

    prediction = model.predict(test_images[index].reshape(1, 28, 28, 1))
    predicted_class = np.argmax(prediction)
    st.write(f"Predicted Class: {predicted_class}")

if st.button("Test Index"):
    test_index_prediction(index)