bandwidth-classifier / src /streamlit_app.py
kodetr's picture
Update src/streamlit_app.py
b0f90b4 verified
raw
history blame
14.5 kB
import altair as alt
import streamlit as st
from streamlit_option_menu import option_menu
import streamlit.components.v1 as html
from PIL import Image
import sys
import os
import re
import pandas as pd
import numpy as np
import tensorflow as tf
# print(tf.__version__)
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras import callbacks
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Embedding, Dense, Dropout, GlobalMaxPooling1D
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import SMOTE
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # -1 CPU | 0 GPU
# Set seed untuk reproduksibilitas
np.random.seed(42)
tf.random.set_seed(42)
class RealTimeLogger(callbacks.Callback):
def __init__(self, container, epochs):
super().__init__()
self.container = container
self.epochs = epochs
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
# Format log text dengan alignment
log_text = (
f"Epoch {epoch+1:03d}/{self.epochs:03d} | "
f"Loss: {logs.get('loss', 0):.4f} | "
f"Acc: {logs.get('accuracy', 0):.4f} | "
f"Val Loss: {logs.get('val_loss', 0):.4f} | "
f"Val Acc: {logs.get('val_accuracy', 0):.4f}\n"
)
# Update session state
if 'training_logs' not in st.session_state:
st.session_state.training_logs = []
st.session_state.training_logs.insert(0, log_text)
# Update tampilan real-time
with self.container:
st.subheader("Training Logs")
st.code(
"".join(st.session_state.training_logs[-100:]), # Batasi 100 line terakhir
language="log",
line_numbers=True
)
def clean_text(text):
text = text.lower()
text = re.sub(r'[^a-zA-Z0-9\s]', '', text)
return text
def load_data(file):
lines = file.getvalue().decode("utf-8").splitlines()
data, labels, bandwidth = [], [], []
for line in lines:
line = clean_text(line.strip())
if "fake bandwidth" in line:
labels.append("Fake")
match = re.search(r'(\d+)', line)
bandwidth.append(int(match.group()) if match else 0)
elif "genuine bandwidth" in line:
labels.append("Genuine")
match = re.search(r'(\d+)', line)
bandwidth.append(int(match.group()) if match else 0)
elif "no heavy activity" in line:
labels.append("No Heavy")
bandwidth.append(0)
else:
continue
data.append(line)
return data, labels, bandwidth
def preprocess_text(texts):
tokenizer = Tokenizer(num_words=10000, oov_token="<OOV>")
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
padded_sequences = pad_sequences(sequences, maxlen=100, padding='post')
return tokenizer, padded_sequences
def build_cnn_model(input_length, num_classes=3, num_words=10000, embedding_dim=240):
model = Sequential([
# Perbaikan: Hapus parameter input_length dari Embedding
Embedding(num_words, embedding_dim),
Conv1D(256, 3, activation='relu', kernel_regularizer=l2(0.01)),
MaxPooling1D(3),
Conv1D(128, 3, activation='relu', kernel_regularizer=l2(0.01)),
MaxPooling1D(3),
Conv1D(64, 3, activation='relu', kernel_regularizer=l2(0.01)),
MaxPooling1D(3),
GlobalMaxPooling1D(),
Dense(128, activation='relu', kernel_regularizer=l2(0.01)),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#----------------------------------------------------------------------------------------------------
# Sidebar
#----------------------------------------------------------------------------------------------------
with st.sidebar:
choose = option_menu("Brandwidth", ["ABOUT", "CNN", "GRU", "LSTM", "COMBINE"],
icons=['person-circle', 'bar-chart-steps', 'activity', 'calendar-week','transparency'],
menu_icon="router", default_index=0,
styles={
"container": {"padding": "5!important", "background-color": "#fafafa"},
"icon": {"color": "black", "font-size": "25px"},
"nav-link": {"font-size": "16px", "text-align": "left", "margin":"0px", "--hover-color": "#eee"},
"nav-link-selected": {"background-color": "#02ab21"},
}
)
#----------------------------------------------------------------------------------------------------
# ABOUT
#----------------------------------------------------------------------------------------------------
# image_about = Image.open(r'/teamspace/studios/this_studio/icons/datamining.jpg')
if choose == "ABOUT":
col1, col2 = st.columns( [0.8, 0.2])
with col1:
st.markdown(
""" <style> .font {
font-size:35px ; color: #000000; font-weight: bold;}
.custom-text {
font-size: 18px;
text-align: justify;
color: #000000;
}
</style> """, unsafe_allow_html=True)
st.markdown('<p class="font">ABOUT</p>', unsafe_allow_html=True)
# with col2:
# st.markdown("## 🧠")
st.write('<p class="custom-text">Saya adalah seorang peneliti dan pengembang di bidang ilmu komputer yang memiliki fokus pada pemrosesan data dan pengklasifikasian bandwidth jaringan menggunakan pendekatan combine classification. Dalam pekerjaan saya, saya menggabungkan berbagai algoritma machine learning untuk meningkatkan akurasi dalam klasifikasi bandwidth, termasuk algoritma seperti Decision Tree, Random Forest, SVM, dan K-Nearest Neighbors., please visit website at: https://kodetr.com</p>', unsafe_allow_html=True)
# st.image(image_about, width=700)
#----------------------------------------------------------------------------------------------------
# CNN
#----------------------------------------------------------------------------------------------------
elif choose == "CNN":
st.title("Klasifikasi Bandwidth dengan CNN")
st.write("Aplikasi ini menggunakan model CNN untuk mengklasifikasikan data bandwidth menjadi Fake, Genuine, atau No Heavy Activity")
# Upload file
training_file = st.file_uploader("Upload Data Training (.txt)", type=["txt"], accept_multiple_files=True)
real_files = st.file_uploader("Upload Data Real (.txt)", type=["txt"], accept_multiple_files=True)
# Parameter model
epochs = st.number_input("Jumlah Epoch", min_value=1, value=2000)
batch_size = st.number_input("Ukuran Batch", min_value=1, value=32)
if st.button("Proses Data"):
if training_file and real_files:
# Memproses data training
try:
data_train, labels_train, bandwidth_train = load_data(training_file)
if len(data_train) == 0:
st.error("Data training tidak valid atau kosong!")
st.stop()
# Preprocessing
tokenizer, X_train = preprocess_text(data_train)
le = LabelEncoder()
labels_encoded = le.fit_transform(labels_train)
# Split data
X_train, X_test, y_train, y_test, bw_train, bw_test = train_test_split(
X_train, labels_encoded, bandwidth_train,
test_size=0.2, stratify=labels_encoded, random_state=42
)
# SMOTE
smote = SMOTE(random_state=42)
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
# Konversi ke kategorikal
y_train_cat = tf.keras.utils.to_categorical(y_train_smote, num_classes=3)
y_test_cat = tf.keras.utils.to_categorical(y_test, num_classes=3)
if 'training_progress' not in st.session_state:
st.session_state.training_progress = []
# Buat container untuk live update
live_container = st.empty()
# Membangun dan melatih model
model = build_cnn_model(X_train.shape[1])
early_stop = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)
history = model.fit(
X_train_smote, y_train_cat,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_test, y_test_cat),
# callbacks=[early_stop, reduce_lr],
# verbose=2
callbacks=[
EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True),
ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=0),
RealTimeLogger(live_container, epochs)
],
verbose=0
)
# Tampilkan grafik training history
st.subheader("πŸ“ˆ Grafik Hasil Training")
# Buat layout 2 kolom
col1, col2 = st.columns(2)
with col1:
# Grafik Loss
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(history.history['loss'], label='Training Loss', color='#FF4B4B', linewidth=2)
ax.plot(history.history['val_loss'], label='Validation Loss', color='#0068C9', linewidth=2)
ax.set_title('Perkembangan Loss', fontsize=14)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel('Loss', fontsize=12)
ax.grid(True, alpha=0.3)
ax.legend()
st.pyplot(fig)
with col2:
# Grafik Accuracy
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(history.history['accuracy'], label='Training Accuracy', color='#00D154', linewidth=2)
ax.plot(history.history['val_accuracy'], label='Validation Accuracy', color='#FF922B', linewidth=2)
ax.set_title('Perkembangan Accuracy', fontsize=14)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel('Accuracy', fontsize=12)
ax.grid(True, alpha=0.3)
ax.legend()
st.pyplot(fig)
# Evaluasi
loss, acc = model.evaluate(X_test, y_test_cat, verbose=0)
st.success(f"Akurasi Model: {acc*100:.2f}%")
# Memproses data real
data_real, labels_real, bandwidth_real = [], [], []
for file in real_files:
d, lbl, bw = load_data(file)
data_real.extend(d)
labels_real.extend(lbl)
bandwidth_real.extend(bw)
# Menghitung statistik
fake = labels_real.count('Fake')
genuine = labels_real.count('Genuine')
no_heavy = labels_real.count('No Heavy')
total = len(labels_real)
# Visualisasi
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
# Pie chart
ax1.pie([fake, genuine, no_heavy],
labels=['Fake', 'Genuine', 'No Heavy'],
autopct='%1.1f%%')
ax1.set_title('Distribusi Kategori Bandwidth')
# Bar plot bandwidth
avg_bw = [
np.mean([bw for lbl, bw in zip(labels_real, bandwidth_real) if lbl == 'Fake'] or [0]),
np.mean([bw for lbl, bw in zip(labels_real, bandwidth_real) if lbl == 'Genuine'] or [0]),
0
]
ax2.bar(['Fake', 'Genuine', 'No Heavy'], avg_bw)
ax2.set_title('Rata-rata Bandwidth per Kategori')
ax2.set_ylabel('Mbps')
st.pyplot(fig)
# Tampilkan statistik
st.subheader("Statistik Data Real:")
st.write(f"Total Data: {total}")
st.write(f"Fake Bandwidth: {fake} ({fake/total*100:.2f}%)")
st.write(f"Genuine Bandwidth: {genuine} ({genuine/total*100:.2f}%)")
st.write(f"No Heavy Activity: {no_heavy} ({no_heavy/total*100:.2f}%)")
except Exception as e:
st.error(f"Terjadi kesalahan: {str(e)}")
else:
st.warning("Harap upload file training dan file real terlebih dahulu!")
#----------------------------------------------------------------------------------------------------
# GRU
#----------------------------------------------------------------------------------------------------
elif choose == "GRU":
st.title("Klasifikasi Bandwidth dengan GRU")
# st.markdown('Design GRU')
#----------------------------------------------------------------------------------------------------
# LSTM
#----------------------------------------------------------------------------------------------------
elif choose == "LSTM":
st.title("Klasifikasi Bandwidth dengan LSTM")
# st.subheader('Test 123')
# st.markdown('Design LSTM')
#----------------------------------------------------------------------------------------------------
# Combine
#----------------------------------------------------------------------------------------------------
elif choose == "COMBINE":
st.title("Klasifikasi Bandwidth dengan COMBINE")
# st.subheader('Test 123')
# st.markdown('Design LSTM')