naufalnashif's picture
Create app.py
a2f0a76
raw
history blame
No virus
15.5 kB
#---------------------------------------------------Requirements----------------------------------------------------------------------
import streamlit as st
import pandas as pd
import numpy as np
import re
import json
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
import requests
from bs4 import BeautifulSoup
from datetime import date
import time
from collections import Counter
import nltk
from nltk.corpus import stopwords
#---------------------------------------------------Scraping Function----------------------------------------------------------------------
@st.cache_data
def scrape_cnbc_data(query, date, jumlah):
data = []
page = 1
progress_text = "Scraping in progress. Please wait."
my_bar = st.progress(len(data), text=progress_text)
for percent_complete in range(jumlah):
if len(data) > jumlah:
data = data[:jumlah]
break
prop = min(len(data) / jumlah, 1)
my_bar.progress(prop, text=progress_text)
base_url = f"https://www.cnbcindonesia.com/search?query={query}&p={page}&kanal=&tipe=artikel&date={date}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
response = requests.get(base_url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
articles = soup.find_all('article')
if not articles:
break
for article in articles:
title = article.find('h2').text.strip()
link = article.find('a')['href']
date = article.find('span', class_='date').text.strip()
data.append({
'date': date,
'judul-berita': title,
'link-berita': link,
})
page += 1
time.sleep(1)
my_bar.empty()
return data
@st.cache_data
def scrape_detik_news(query, jumlah):
site_id = 2
data = []
page = 1
progress_text = "Scraping in progress. Please wait."
my_bar = st.progress(len(data), text=progress_text)
for percent_complete in range(jumlah):
if len(data) > jumlah:
data = data[:jumlah]
break
prop = min(len(data) / jumlah, 1)
my_bar.progress(prop, text=progress_text)
base_url = "https://www.detik.com/search/searchall"
params = {
"query": query,
"siteid": site_id,
"page": page
}
response = requests.get(base_url, params=params)
soup = BeautifulSoup(response.content, "html.parser")
articles = soup.find_all("article")
if not articles:
break
for article in articles:
date = article.find("span", class_="date").text.strip()
title = article.find("h2", class_="title").text.strip()
link = article.find("a")["href"]
data.append({"date": date, "judul-berita": title, "link-berita": link})
page += 1
time.sleep(1)
my_bar.empty()
return data
#---------------------------------------------------Data Cleaning (RegEx)----------------------------------------------------------------------
def clean_text(text):
# Tahap-1: Menghapus karakter non-ASCII
text = re.sub(r'[^\x00-\x7F]+', '', text)
# Tahap-2: Menghapus URL
text = re.sub(r'http[s]?://.[a-zA-Z0-9./_?=%&#+!]+', '', text)
text = re.sub(r'pic.twitter.com?.[a-zA-Z0-9./_?=%&#+!]+', '', text)
# Tahap-3: Menghapus mentions
text = re.sub(r'@[\w]+', '', text)
# Tahap-4: Menghapus hashtag
text = re.sub(r'#([\w]+)', '', text)
# Tahap-5 Menghapus 'amp' yang menempel pada '&' dan 'gt' yang menempel pada '&'
text = re.sub(r'&|>', '', text)
# Tahap-6: Menghapus karakter khusus (simbol)
text = re.sub(r'[!$%^&*@#()_+|~=`{}\[\]%\-:";\'<>?,./]', '', text)
# Tahap-7: Menghapus angka
text = re.sub(r'[0-9]+', '', text)
# Tahap-8: Menggabungkan spasi ganda menjadi satu spasi
text = re.sub(' +', ' ', text)
# Tahap-9: Menghapus spasi di awal dan akhir kalimat
text = text.strip()
# Tahap-10: Konversi teks ke huruf kecil
text = text.lower()
# Tahap-11: koreksi duplikasi tiga karakter beruntun atau lebih (contoh. yukkk)
# text = re.sub(r'([a-zA-Z])\1\1', '\\1', text)
#text = re.sub(r'(.)(\1{2,})', r'\1\1', text)
text = re.sub(r'(\w)\1{2,}', r'\1', text)
return text
#---------------------------------------------------Normalisasi----------------------------------------------------------------------
# Membaca kamus kata gaul Salsabila
kamus_path = '_json_colloquial-indonesian-lexicon.txt' # Ganti dengan path yang benar
with open(kamus_path) as f:
data = f.read()
lookp_dict = json.loads(data)
# Dict kata gaul saya sendiri yang tidak masuk di dict Salsabila
kamus_sendiri_path = 'kamus_gaul_custom.txt'
with open(kamus_sendiri_path) as f:
kamus_sendiri = f.read()
kamus_gaul_baru = json.loads(kamus_sendiri)
# Menambahkan dict kata gaul baru ke kamus yang sudah ada
lookp_dict.update(kamus_gaul_baru)
# Fungsi untuk normalisasi kata gaul
def normalize_slang(text, slang_dict):
words = text.split()
normalized_words = [slang_dict.get(word, word) for word in words]
return ' '.join(normalized_words)
#---------------------------------------------------NLTK Remove Stopwords----------------------------------------------------------------------
# Inisialisasi stopwords bahasa Indonesia
nltk.download("stopwords")
stop_words = set(stopwords.words("indonesian"))
def remove_stopwords(text, stop_words):
# Pecah teks menjadi kata-kata
words = text.split()
# Hapus stopwords bahasa Indonesia
words = [word for word in words if word not in stop_words]
return " ".join(words)
#---------------------------------------------------User Interface----------------------------------------------------------------------
# Streamlit UI
st.title("Aplikasi Web Scraping CNBC / Detik.com & Explorasi Data")
# Pilihan untuk memilih situs web
selected_site = st.selectbox("Pilih Situs Web :", ["CNBC Indonesia", "Detik.com"])
query = st.text_input("Masukkan Query :")
jumlah = st.number_input("Masukkan Estimasi Banyak Data :", min_value = 1, step = 1, placeholder="Type a number...")
date = date.today()
download_format = st.selectbox("Pilih Format Unduhan :", ["XLSX", "CSV", "JSON", "TXT"])
st.info('Tekan "Mulai Scraping" kembali jika tampilan menghilang ', icon="ℹ️")
# Variabel tersembunyi untuk menyimpan hasil scraping
hidden_data = []
scraping_done = False # Tambahkan variabel ini
#---------------------------------------------------CNBC Indonesia----------------------------------------------------------------------
if selected_site == "CNBC Indonesia":
if st.button("Mulai Scraping"):
if not query:
st.error("Mohon isi query.")
else:
data_df = scrape_cnbc_data(query, date.strftime("%Y/%m/%d"), jumlah)
hidden_data = data_df # Simpan data ke dalam variabel tersembunyi
scraping_done = True # Set scraping_done menjadi True
#---------------------------------------------------Eksplorasi Data--------------------------------------------------
df = pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"])
texts = df["judul-berita"]
# Initialize results
results = []
# Process the text data
for text in texts:
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, lookp_dict)
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words)
results.append((text, cleaned_text, norm_slang_text, tanpa_stopwords))
# Membagi tampilan menjadi dua kolom
columns = st.columns(2)
# Inisialisasi all_texts di luar blok with columns[0]
all_texts = ""
# Kolom pertama untuk Word Cloud
with columns[0]:
if results:
all_texts = [result[3] for result in results if result[3] is not None and not pd.isna(result[3])]
all_texts = " ".join(all_texts)
st.subheader("Word Cloud")
if all_texts:
wordcloud = WordCloud(width=800, height=660, background_color='white',
colormap='Purples',
contour_color='black',
contour_width=2,
mask=None).generate(all_texts)
st.image(wordcloud.to_array())
else:
st.write("Tidak ada data untuk ditampilkan.")
# Kolom kedua untuk Most Comon Words
with columns[1]:
st.subheader("Most Common Words")
if all_texts:
word_counts = Counter(all_texts.split())
most_common_words = word_counts.most_common(5)
words, counts = zip(*most_common_words)
fig, ax = plt.subplots(figsize=(10, 6))
ax.bar(words, counts)
ax.set_xlabel("Kata-kata")
ax.set_ylabel("Jumlah")
ax.set_title("Kata-kata Paling Umum")
ax.tick_params(axis='x', rotation=45)
st.pyplot(fig)
else:
st.write("Tidak ada data untuk ditampilkan dalam Word Cloud.")
if not hidden_data:
st.warning(f"Tidak ada data pada query '{query}'", icon="⚠️")
#---------------------------------------------------Detik.com----------------------------------------------------------------------
elif selected_site == "Detik.com":
if st.button("Mulai Scraping"):
if not query:
st.error("Mohon isi query.")
else:
data_df = scrape_detik_news(query = query, jumlah = jumlah)
hidden_data = data_df # Simpan data ke dalam variabel tersembunyi
scraping_done = True
#---------------------------------------------------Eksplorasi Data--------------------------------------------------
df = pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"])
texts = df["judul-berita"]
# Initialize results
results = []
# Process the text data
for text in texts:
cleaned_text = clean_text(text)
norm_slang_text = normalize_slang(cleaned_text, lookp_dict)
tanpa_stopwords = remove_stopwords(norm_slang_text, stop_words)
results.append((text, cleaned_text, norm_slang_text, tanpa_stopwords))
# Membagi tampilan menjadi dua kolom
columns = st.columns(2)
# Inisialisasi all_texts di luar blok with columns[0]
all_texts = ""
# Kolom pertama untuk Word Cloud
with columns[0]:
if results:
all_texts = [result[3] for result in results if result[3] is not None and not pd.isna(result[3])]
all_texts = " ".join(all_texts)
st.subheader("Word Cloud")
if all_texts:
wordcloud = WordCloud(width=800, height=660, background_color='white',
colormap='Purples',
contour_color='black',
contour_width=2,
mask=None).generate(all_texts)
st.image(wordcloud.to_array())
else:
st.write("Tidak ada data untuk ditampilkan.")
# Kolom kedua untuk Most Common Words
with columns[1]:
st.subheader("Most Common Words")
if all_texts:
word_counts = Counter(all_texts.split())
most_common_words = word_counts.most_common(5)
words, counts = zip(*most_common_words)
fig, ax = plt.subplots(figsize=(10, 6))
ax.bar(words, counts)
ax.set_xlabel("Kata-kata")
ax.set_ylabel("Jumlah")
ax.set_title("Kata-kata Paling Umum")
ax.tick_params(axis='x', rotation=45)
st.pyplot(fig)
else:
st.write("Tidak ada data untuk ditampilkan dalam Word Cloud.")
if not hidden_data:
st.warning(f"Tidak ada data pada query '{query}'", icon="⚠️")
#---------------------------------------------------Download File & Hasil Scraping----------------------------------------------------------------------
# Tampilkan hasil scraping
if scraping_done:
if hidden_data:
if download_format == "XLSX":
st.subheader("Hasil Scraping")
st.write(pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"]))
df = pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"])
df.to_excel("hasil_scraping.xlsx", index=False)
st.download_button(label=f"Unduh XLSX ({len(hidden_data)} data)", data=open("hasil_scraping.xlsx", "rb").read(), key="xlsx_download", file_name="hasil_scraping.xlsx")
elif download_format == "CSV":
st.subheader("Hasil Scraping")
st.write(pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"]))
df = pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"])
csv = df.to_csv(index=False)
st.download_button(label=f"Unduh CSV ({len(hidden_data)} data)", data=csv, key="csv_download", file_name="hasil_scraping.csv")
elif download_format == "JSON":
st.subheader("Hasil Scraping")
st.write(pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"]))
json_data = pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"]).to_json(orient="records")
st.download_button(label=f"Unduh JSON ({len(hidden_data)} data)", data=json_data, key="json_download", file_name="hasil_scraping.json")
elif download_format == "TXT":
st.subheader("Hasil Scraping")
st.write(pd.DataFrame(hidden_data, columns=["date", "judul-berita", "link-berita"]))
text_data = "\n".join([f"{row['date']} - {row['judul-berita']} - {row['link-berita']}" for row in hidden_data])
st.download_button(label=f"Unduh TXT ({len(hidden_data)} data)", data=text_data, key="txt_download", file_name="hasil_scraping.txt")
if not scraping_done:
st.write("Tidak ada data untuk diunduh.")
st.divider()
github_link = "https://github.com/naufalnashif/"
st.markdown(f"GitHub: [{github_link}]({github_link})")
instagram_link = "https://www.instagram.com/naufal.nashif/"
st.markdown(f"Instagram: [{instagram_link}]({instagram_link})")
st.write('Terima kasih telah mencoba demo ini!')