naufalnashif's picture
Update app.py
030ebd1 verified
raw
history blame
No virus
22 kB
import streamlit as st
import pandas as pd
import numpy as np
import re
import json
import joblib
from sklearn.feature_extraction.text import TfidfVectorizer
# Impor library tambahan
#import matplotlib.pyplot as plt
#import seaborn as sns
#import plotly.express as px
from wordcloud import WordCloud
import nltk
from nltk.corpus import stopwords
#from transformers import pipeline
# Fungsi untuk membersihkan teks dengan ekspresi reguler
#@st.cache_data
def clean_text(text):
# Tahap-1: Menghapus karakter non-ASCII
text = re.sub(r'[^\x00-\x7F]+', '', text)
# Tahap-2: Menghapus URL
text = re.sub(r'http[s]?://.[a-zA-Z0-9./_?=%&#+!]+', '', text)
text = re.sub(r'pic.twitter.com?.[a-zA-Z0-9./_?=%&#+!]+', '', text)
# Tahap-3: Menghapus mentions
text = re.sub(r'@[\w]+', '', text)
# Tahap-4: Menghapus hashtag
text = re.sub(r'#([\w]+)', '', text)
# Tahap-5 Menghapus 'amp' yang menempel pada '&' dan 'gt' yang menempel pada '&'
text = re.sub(r'&|>', '', text)
# Tahap-6: Menghapus karakter khusus (simbol)
text = re.sub(r'[!$%^&*@#()_+|~=`{}\[\]%\-:";\'<>?,./]', '', text)
# Tahap-7: Menghapus angka
text = re.sub(r'[0-9]+', '', text)
# Tahap-8: Menggabungkan spasi ganda menjadi satu spasi
text = re.sub(' +', ' ', text)
# Tahap-9: Menghapus spasi di awal dan akhir kalimat
text = text.strip()
# Tahap-10: Konversi teks ke huruf kecil
text = text.lower()
# Tahap-11: koreksi duplikasi tiga karakter beruntun atau lebih (contoh. yukkk)
# text = re.sub(r'([a-zA-Z])\1\1', '\\1', text)
#text = re.sub(r'(.)(\1{2,})', r'\1\1', text)
text = re.sub(r'(\w)\1{2,}', r'\1', text)
return text
@st.cache_resource
def load_file(kamus_path, kamus_sendiri_path):
# Membaca kamus kata gaul Salsabila
with open(kamus_path) as f:
data = f.read()
lookp_dict = json.loads(data)
# Dict kata gaul saya sendiri yang tidak masuk di dict Salsabila
with open(kamus_sendiri_path) as f:
kamus_sendiri = f.read()
kamus_gaul_baru = json.loads(kamus_sendiri)
# Menambahkan dict kata gaul baru ke kamus yang sudah ada
lookp_dict.update(kamus_gaul_baru)
nltk.download("stopwords")
stop_words = set(stopwords.words("indonesian"))
additional_stopwords = [] # Ganti dengan kata-kata yang ingin Anda tambahkan
stop_words.update(additional_stopwords)
# Hapus beberapa kata dari kamus stopwords agar tidak terhapus pada tweets
words_to_remove = ['lama', 'datang', 'sekarang', 'percuma', 'jauh', 'waktu', 'kurang', 'bagaimana', 'gimana','tanya','berapa','jadwal','info','naik' ]
for word in words_to_remove:
if word in stop_words:
stop_words.remove(word)
tfidf_vectorizer = joblib.load(tfidf_model_path)
model_ensemble = joblib.load('ensemble_clf_soft_smote.joblib')
#model_rf
model_nb = joblib.load('naive_bayes_model_smote.joblib')
model_lr = joblib.load('logreg_model_smote.joblib')
return lookp_dict, stop_words, tfidf_vectorizer, model_ensemble, model_nb, model_lr
# Fungsi untuk normalisasi kata gaul
#@st.cache_data
def normalize_slang(text, slang_dict):
words = text.split()
normalized_words = [slang_dict.get(word, word) for word in words]
return ' '.join(normalized_words)
#---------------------------------------------------NLTK Remove Stopwords----------------------------------------------------------------------
#@st.cache_data
def remove_stopwords(text, stop_words):
# Pecah teks menjadi kata-kata
words = text.split()
# Hapus stopwords bahasa Indonesia
words = [word for word in words if word not in stop_words]
return " ".join(words)
#---------------------------------------------------TFIDF----------------------------------------------------------------------
# Memuat model TF-IDF dengan joblib (pastikan path-nya benar)
# Fungsi untuk ekstraksi fitur TF-IDF
#@st.cache_data
#def extract_tfidf_features(texts, _tfidf_vectorizer):
# tfidf_matrix = tfidf_vectorizer.transform(texts)
# return tfidf_matrix
#---------------------------------------------------Milih Model----------------------------------------------------------------------
# Fungsi untuk memilih model berdasarkan pilihan pengguna
def select_sentiment_model(selected_model, model_enesmble, model_nb, model_lr):
if selected_model == "Ensemble":
model = model_ensemble
elif selected_model == "Random Forest":
model = model_ensemble
elif selected_model == "Naive Bayes":
model = model_nb
elif selected_model == "Logistic Regression":
model = model_lr
else:
# Fallback ke model default jika pilihan tidak valid
model = model_ensemble
return model
# Fungsi untuk prediksi sentimen
# def predict_sentiment(text, _sentiment_model, _tfidf_vectorizer, slang_dict):
# # Tahap-1: Membersihkan dan normalisasi teks
# cleaned_text = clean_text(text)
# norm_slang_text = normalize_slang(cleaned_text, slang_dict)
# # Tahap-2: Ekstraksi fitur TF-IDF
# tfidf_matrix = _tfidf_vectorizer.transform([norm_slang_text])
# # Tahap-3: Lakukan prediksi sentimen
# sentiment = _sentiment_model.predict(tfidf_matrix)
# # Tahap-4: Menggantikan indeks dengan label sentimen
# labels = {0: "Negatif", 1: "Netral", 2: "Positif"}
# sentiment_label = labels[int(sentiment)]
# if sentiment == "Positif":
# emoticon = "πŸ˜„" # Emotikon untuk sentimen positif
# elif sentiment == "Negatif":
# emoticon = "😞" # Emotikon untuk sentimen negatif
# else:
# emoticon = "😐" # Emotikon untuk sentimen netral
# return sentiment_label, emoticon
def predict_sentiment(text, _sentiment_model, _tfidf_vectorizer, slang_dict):
# Tahap-1: Membersihkan dan normalisasi teks
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, slang_dict)
# Tahap-2: Ekstraksi fitur TF-IDF
tfidf_matrix = _tfidf_vectorizer.transform([norm_slang_text])
# Tahap-3: Lakukan prediksi sentimen
sentiment = _sentiment_model.predict(tfidf_matrix)[0] # Ambil elemen pertama dari hasil prediksi
# Tahap-4: Menggantikan indeks dengan label sentimen
labels = {0: "Negatif", 1: "Netral", 2: "Positif"}
sentiment_label = labels[sentiment]
# Tahap-5: Tentukan emoticon berdasarkan label sentimen
emoticons = {"Negatif": "😞", "Netral": "😐", "Positif": "πŸ˜„"}
emoticon = emoticons.get(sentiment_label, "😐") # Default emoticon untuk label tidak dikenal
return sentiment_label, emoticon
@st.cache_data
def buat_chart(df, target_year):
target_year = int(target_year)
st.write(f"Bar Chart Tahun {target_year}:")
# Ambil bulan
df['Date'] = pd.to_datetime(df['Date']) # Convert 'Date' column to datetime
df['month'] = df['Date'].dt.month
df['year'] = df['Date'].dt.year
# Filter DataFrame for the desired year
df_filtered = df[df['year'] == target_year]
# Check if data for the target year is available
if df_filtered.empty:
st.warning(f"Tidak ada data untuk tahun {target_year}.")
return
# Mapping nilai bulan ke nama bulan
bulan_mapping = {
1: f'Januari {target_year}',
2: f'Februari {target_year}',
3: f'Maret {target_year}',
4: f'April {target_year}',
5: f'Mei {target_year}',
6: f'Juni {target_year}',
7: f'Juli {target_year}',
8: f'Agustus {target_year}',
9: f'September {target_year}',
10: f'Oktober {target_year}',
11: f'November {target_year}',
12: f'Desember {target_year}'
}
# Mengganti nilai dalam kolom 'month' menggunakan mapping
df_filtered['month'] = df_filtered['month'].replace(bulan_mapping)
# Menentukan warna untuk setiap kategori dalam kolom 'score'
warna_label = {
'Negatif': '#FF9AA2',
'Netral': '#FFDAC1',
'Positif': '#B5EAD7'
}
# Sorting unique scores
unique_label = sorted(df_filtered['label'].unique())
# Ensure months are in the correct order
months_order = [
f'Januari {target_year}', f'Februari {target_year}', f'Maret {target_year}', f'April {target_year}', f'Mei {target_year}', f'Juni {target_year}',
f'Juli {target_year}', f'Agustus {target_year}', f'September {target_year}', f'Oktober {target_year}', f'November {target_year}', f'Desember {target_year}'
]
# Sort DataFrame based on the custom order of months
df_filtered['month'] = pd.Categorical(df_filtered['month'], categories=months_order, ordered=True)
df_filtered = df_filtered.sort_values('month')
# Create a bar chart with stacking and manual colors
st.bar_chart(
df_filtered.groupby(['month', 'label']).size().unstack().fillna(0),
color=[warna_label[label] for label in unique_label]
)
@st.cache_data(show_spinner = 'On progress, please wait...')
def all_data_process(texts, df, lookp_dict, stop_words, _sentiment_model, _tfidf_vectorizer):
results = []
analisis = False
if 'Text' in df.columns:
if 'Date' in df.columns:
for text, date in zip(texts, df['Date']):
sentiment_label, emoticon = predict_sentiment(text, _sentiment_model, _tfidf_vectorizer, lookp_dict)
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, lookp_dict)
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words)
result_entry = {
'Date': date,
'Text': text,
'cleaned-text': cleaned_text,
'normalisasi-text': norm_slang_text,
'stopwords-remove': tanpa_stopwords,
'label': sentiment_label,
'emotikon': emoticon,
}
results.append(result_entry)
analisis = True
else:
for text in texts:
sentiment_label, emoticon = predict_sentiment(text, _sentiment_model, _tfidf_vectorizer, lookp_dict)
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, lookp_dict)
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words)
result_entry = {
'Text': text,
'cleaned-text': cleaned_text,
'normalisasi-text': norm_slang_text,
'stopwords-remove': tanpa_stopwords,
'label': sentiment_label,
'emotikon': emoticon,
}
results.append(result_entry)
analisis = True
else:
st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.")
return results, analisis
# Fungsi untuk membuat tautan unduhan
def get_table_download_link(df, download_format):
if download_format == "XLSX":
df.to_excel("hasil_sentimen.xlsx", index=False)
return f'<a href="hasil_sentimen.xlsx" download="hasil_sentimen.xlsx">Unduh File XLSX</a>'
else:
csv = df.to_csv(index=False)
return f'<a href="data:file/csv;base64,{b64encode(csv.encode()).decode()}" download="hasil_sentimen.csv">Unduh File CSV</a>'
# Judul
st.image('https://github.com/naufalnashif/sentiment-analysis-biskita/blob/main/assets/stk_logo-2.jpg?raw=true')
st.title("Sentiment Analysis : Based on Tweets Biskita Transpakuan Bogor 2022-2023")
preference_barchart_date = False
#-----------------------------------------------------General Settings---------------------------------------------------------------
with st.sidebar :
st.subheader('Settings :')
with st.expander("General Settings :"):
# Tambahkan widget untuk memilih model
selected_model = st.selectbox("Pilih Model Sentimen:", ("Ensemble", "Naive Bayes", "Logistic Regression", "Transformer"))
# Pilihan input teks manual atau berkas XLSX
input_option = st.radio("Pilih metode input:", ("Teks Manual", "Unggah Berkas XLSX"))
if input_option == "Teks Manual":
# Input teks dari pengguna
user_input = st.text_area("Masukkan teks:", "")
else:
# Input berkas XLSX
uploaded_file = st.file_uploader("Unggah berkas XLSX", type=["xlsx"])
st.caption("Pastikan berkas XLSX Anda memiliki kolom yang bernama :blue[Text] _(Maks.10000 data)_.")
st.caption("Jika terdapat kolom type :blue[datetime], ganti nama kolom menjadi :blue[Date]")
if uploaded_file is not None:
df = pd.read_excel(uploaded_file)
df = df[:10000]
if 'Text' not in df.columns:
st.warning("Berkas XLSX harus memiliki kolom bernama 'Text' untuk analisis sentimen.")
if not df['Text'].empty:
st.warning("Kolom 'Text' harus mempunyai value.")
else:
texts = df['Text'] # Sesuaikan dengan nama kolom di berkas XLSX Anda
if "Date" in df.columns :
if not df['Date'].empty:
dates = df['Date']
preference_barchart_date = True
#-----------------------------------------------------Preference Settings--------------------------------------------------
with st.expander ("Preference Settings:"):
colormap = st.selectbox("Pilih Warna Wordclouds :", ["Greys", "Purples", "Blues", "Greens", "Oranges", "Reds", "YlOrBr", "YlOrRd", "OrRd", "PuRd", "RdPu", "BuPu", "GnBu", "PuBu", "YlGnBu", "PuBuGn", "BuGn", "YlGn"])
if preference_barchart_date == True:
bar = st.selectbox("Pilih Tampilan Bar Chart :", ("Distribusi Kelas", "Distribusi Kelas Berdasarkan Waktu"), index = 0)
df_target_year = df['Date'].astype(str)
target_year = st.selectbox("Pilih Tahun Bar Chart :", df_target_year.str[:4].unique())
st.info('Tekan "Analysis" kembali jika tampilan menghilang', icon = 'ℹ️')
button = st.button("Analysis")
tab1, tab2, tab3, tab4 = st.tabs(["πŸ“‹ Documentation", "πŸ“ˆ Results", "🀡 Creator", "πŸ” More"])
with tab1:
@st.cache_resource
def tab_1():
st.header("Documentation:")
'''
Langkah - langkah :
1. Buka sidebar sebelah kiri
2. Buka General Settings
3. Pilih Model
4. Pilih Input ('Text Manual', 'File Xlsx')
- Input manual dapat berisi banyak input, lakukan dengan tekan 'enter' untuk menambah line baru
5. File xlsx harus memiliki kolom 'Text'
6. Kolom type datetime "%Y-%m-%d %H:%M:%S" harus bernama 'Date', untuk mengaktifkan fitur tambahan
7. Buka Preferences Settings untuk menyetel tampilan Wordclouds/Barchart
8. Klik Analysis
9. Buka tab Results
'''
st.write('Data bisa dicari di sini:')
more1, more2, more3 = st.columns(3)
with more1 :
st.image('playstore.png', caption = 'Scraping Playstore Reviews')
more1_link = "https://huggingface.co/spaces/naufalnashif/scraping-playstore-reviews"
st.markdown(f"[{more1_link}]({more1_link})")
with more2 :
st.image('News.png', caption = 'Scraping News Headline')
more2_link = "https://huggingface.co/spaces/naufalnashif/scraping-news-headline"
st.markdown(f"[{more2_link}]({more2_link})")
with more3 :
st.image('Ecommerce.png', caption = 'Scraping Ecommerce Product')
more3_link = "https://huggingface.co/spaces/naufalnashif/scraping-ecommerce-2023"
st.markdown(f"[{more3_link}]({more3_link})")
tab_1()
with tab2:
st.header("Results:")
kamus_path = '_json_colloquial-indonesian-lexicon (1).txt'
kamus_sendiri_path = 'kamus_gaul_custom.txt'
tfidf_model_path = 'X_tfidf_model.joblib'
lookp_dict, stop_words, tfidf_vectorizer, model_ensemble, model_nb, model_lr = load_file(kamus_path, kamus_sendiri_path)
sentiment_model = select_sentiment_model(selected_model, model_ensemble, model_lr, model_nb)
# Analisis sentimen
results = []
analisis = False
if input_option == "Teks Manual" and user_input:
if button:
# Pisahkan teks yang dimasukkan pengguna menjadi baris-baris terpisah
user_texts = user_input.split('\n')
for text in user_texts:
sentiment_label, emoticon = predict_sentiment(text, sentiment_model, tfidf_vectorizer, lookp_dict)
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, lookp_dict)
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words)
results.append({
'Text': text,
'cleaned-text' : cleaned_text,
'normalisasi-text' : norm_slang_text,
'stopwords-remove' : tanpa_stopwords,
'label' : sentiment_label,
'emotikon' : emoticon,
})
analisis = True
elif input_option == "Unggah Berkas XLSX" and uploaded_file is not None:
if button:
results, analisis = all_data_process(texts, df, lookp_dict, stop_words, sentiment_model, tfidf_vectorizer)
if results and analisis == True:
df_results = pd.DataFrame(results)
# Membagi tampilan menjadi dua kolom
columns = st.columns(2)
# Kolom pertama untuk Word Cloud
with columns[0]:
st.write("Wordclouds:")
all_texts = [result['stopwords-remove'] for result in results if result['stopwords-remove'] is not None and not pd.isna(result['stopwords-remove'])]
all_texts = " ".join(all_texts)
if all_texts:
wordcloud = WordCloud(width=800, height=660, background_color='white',
colormap=colormap, # Warna huruf
contour_color='black', # Warna kontur
contour_width=2, # Lebar kontur
mask=None, # Gunakan mask untuk bentuk kustom
).generate(all_texts)
st.image(wordcloud.to_array())
else:
st.write("Tidak ada data untuk ditampilkan dalam Word Cloud.")
if 'Date' in df_results.columns:
if bar == "Distribusi Kelas Berdasarkan Waktu":
if not df_results['Date'].empty:
with columns[1]:
buat_chart(df_results, target_year)
else :
# Kolom kedua untuk Bar Chart
with columns[1]:
st.write("Bar Chart :")
# Membuat bar chart
st.bar_chart(
df_results["label"].value_counts()
)
else :
# Kolom kedua untuk Bar Chart
with columns[1]:
st.write("Bar Chart :")
# Membuat bar chart
st.bar_chart(
df_results["label"].value_counts()
)
# Menampilkan hasil analisis sentimen dalam kotak yang dapat diperluas
with st.expander("Hasil Analisis Sentimen"):
# Tampilkan tabel hasil analisis sentimen
st.write(pd.DataFrame(results))
if results:
# Simpan DataFrame ke dalam file CSV
df = pd.DataFrame(results)
csv = df.to_csv(index=False)
# Tampilkan tombol unduh CSV
st.download_button(label="Unduh CSV", data=csv, key="csv_download", file_name="hasil_sentimen.csv")
else:
st.write("Tidak ada data untuk diunduh.")
else:
st.write("Tidak ada data untuk ditampilkan")
with tab3:
@st.cache_resource
def tab_3():
st.header("Profile:")
st.image('https://github.com/naufalnashif/naufalnashif.github.io/blob/main/assets/img/my-profile-semhas.jpeg?raw=true', caption='Naufal Nashif')
st.subheader('Hello, nice to meet you !')
# Tautan ke GitHub
github_link = "https://github.com/naufalnashif/"
st.markdown(f"GitHub: [{github_link}]({github_link})")
# Tautan ke Instagram
instagram_link = "https://www.instagram.com/naufal.nashif/"
st.markdown(f"Instagram: [{instagram_link}]({instagram_link})")
# Tautan ke Website
website_link = "https://naufalnashif.netlify.app/"
st.markdown(f"Website: [{website_link}]({website_link})")
tab_3()
with tab4:
@st.cache_resource
def tab_4():
st.header("More:")
more1, more2, more3 = st.columns(3)
with more1 :
st.image('playstore.png', caption = 'Scraping Playstore Reviews')
more1_link = "https://huggingface.co/spaces/naufalnashif/scraping-playstore-reviews"
st.markdown(f"[{more1_link}]({more1_link})")
with more2 :
st.image('News.png', caption = 'Scraping News Headline')
more2_link = "https://huggingface.co/spaces/naufalnashif/scraping-news-headline"
st.markdown(f"[{more2_link}]({more2_link})")
with more3 :
st.image('Ecommerce.png', caption = 'Scraping Ecommerce Product')
more3_link = "https://huggingface.co/spaces/naufalnashif/scraping-ecommerce-2023"
st.markdown(f"[{more3_link}]({more3_link})")
tab_4()
# Garis pemisah
st.divider()
st.write('Thank you for trying the demo!')
st.caption('Best regards, Naufal Nashif :sunglasses: | ©️ 2023')