File size: 12,242 Bytes
c03661c 653fb41 8ab064e 5f051dd c03661c 1d7dab4 653fb41 4d7058d ea9bd23 653fb41 30ce21a 653fb41 7f6a197 653fb41 7f6a197 653fb41 7f6a197 653fb41 ac39666 30ce21a 5f051dd 20a453f 2c8defa 7b37510 c90932c c377dfd 2c8defa 9e6813f f64fbfb 2b6811f 2c8defa 749cb44 d122d24 7b37510 d122d24 2b6811f 47d7c47 2c8defa 2b6811f d122d24 2b6811f d122d24 2c8defa 2b6811f 7b37510 2b6811f b09d9c8 2c8defa b09d9c8 fdda8f4 d122d24 2c8defa b09d9c8 d122d24 56f8a61 77a0b0f ca25ec8 8dfc0ec 653fb41 bdb1459 e102cd9 30ce21a 77a0b0f 30ce21a 7fbc81f 30ce21a e102cd9 2b823e6 8dfc0ec 30ce21a 8dfc0ec 30ce21a 2b823e6 30ce21a 2b823e6 30ce21a 88354c2 30ce21a dc2d82c 30ce21a 46d7eaf 30ce21a 88354c2 30ce21a e102cd9 30ce21a e102cd9 30ce21a 44f0da5 653fb41 44f0da5 1831598 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 |
import streamlit as st
import pandas as pd
#from google_play_scraper import app, Sort, reviews, permission, reviews_all, search
from google_play_scraper import app, Sort, reviews, reviews_all, permissions, search
import re
from datetime import datetime
import pytz
#---------------------------------------------func----------------------------------
@st.cache_data
def get_url_by_app_name(nama_apl):
"""
Mengembalikan URL aplikasi berdasarkan nama aplikasi dari kamus.
Parameters:
- nama_apl (str): Nama aplikasi yang dicari.
- aplikasi_dict (dict): Kamus yang memetakan nama aplikasi ke URL.
Returns:
- str or None: URL aplikasi atau None jika tidak ditemukan.
"""
list_url = [
'https://play.google.com/store/apps/details?id=com.shopee.id',
'https://play.google.com/store/apps/details?id=com.tokopedia.tkpd',
'https://play.google.com/store/apps/details?id=com.amazon.mShop.android.shopping',
'https://play.google.com/store/apps/details?id=com.grabtaxi.passenger'
]
aplikasi_dict = {
'Shopee': list_url[0],
'Tokopedia': list_url[1],
'Amazon': list_url[2],
'Grab': list_url[3]
}
return aplikasi_dict.get(nama_apl, None)
@st.cache_data
def extract_app_id(play_store_url):
# Definisikan pola ekspresi reguler untuk menemukan ID aplikasi
pattern = r'id=([a-zA-Z0-9._]+)'
# Gunakan ekspresi reguler untuk mencocokkan pola dalam URL
match = re.search(pattern, play_store_url)
# Periksa apakah ada kecocokan dan kembalikan ID aplikasi jika ada
if match:
app_id = match.group(1)
return app_id
else:
return None
@st.cache_data(show_spinner = 'On progress, please wait...')
def scraping_func(app_id, bahasa, negara, filter_score, jumlah):
filter_score = None if filter_score == "Semua Rating" else filter_score
rws, token = reviews(
app_id,
lang=bahasa,
country=negara,
sort=Sort.NEWEST,
filter_score_with=filter_score,
count=jumlah
)
scraping_done = bool(rws)
return rws, token, scraping_done
@st.cache_data(show_spinner = 'On progress, please wait...')
def scraping_all_func(app_id, bahasa, negara, filter_score, sleep = 0):
filter_score = None if filter_score == "Semua Rating" else filter_score
rws = reviews_all(
app_id,
sleep_milliseconds=sleep, # defaults to 0
lang=bahasa,
country=negara,
filter_score_with=filter_score,
)
scraping_done = bool(rws)
return rws, scraping_done
@st.cache_data
def buat_chart(df, target_year):
st.write(f"Bar Chart Tahun {target_year}:")
# Ambil bulan
df['at'] = pd.to_datetime(df['at']) # Convert 'at' column to datetime
df['month'] = df['at'].dt.month
df['year'] = df['at'].dt.year
# Filter DataFrame for the desired year
df_filtered = df[df['year'] == target_year]
# Check if data for the target year is available
if df_filtered.empty:
st.warning(f"Tidak ada data untuk tahun {target_year}.")
return
# Mapping nilai bulan ke nama bulan
bulan_mapping = {
1: f'Januari {target_year}',
2: f'Februari {target_year}',
3: f'Maret {target_year}',
4: f'April {target_year}',
5: f'Mei {target_year}',
6: f'Juni {target_year}',
7: f'Juli {target_year}',
8: f'Agustus {target_year}',
9: f'September {target_year}',
10: f'Oktober {target_year}',
11: f'November {target_year}',
12: f'Desember {target_year}'
}
# Mengganti nilai dalam kolom 'month' menggunakan mapping
df_filtered['month'] = df_filtered['month'].replace(bulan_mapping)
# Menentukan warna untuk setiap kategori dalam kolom 'score'
warna_score = {
1: '#FF9AA2',
2: '#FFB7B2',
3: '#FFDAC1',
4: '#E2F0CB',
5: '#B5EAD7'
}
# Sorting unique scores
unique_scores = sorted(df_filtered['score'].unique())
# Ensure months are in the correct order
months_order = [
f'Januari {target_year}', f'Februari {target_year}', f'Maret {target_year}', f'April {target_year}', f'Mei {target_year}', f'Juni {target_year}',
f'Juli {target_year}', f'Agustus {target_year}', f'September {target_year}', f'Oktober {target_year}', f'November {target_year}', f'Desember {target_year}'
]
# Sort DataFrame based on the custom order of months
df_filtered['month'] = pd.Categorical(df_filtered['month'], categories=months_order, ordered=True)
df_filtered = df_filtered.sort_values('month')
# Create a bar chart with stacking and manual colors
st.bar_chart(
df_filtered.groupby(['month', 'score']).size().unstack().fillna(0),
color=[warna_score[score] for score in unique_scores]
)
utc_timezone = pytz.timezone('UTC')
datetime_utc = datetime.now(utc_timezone)
wib_timezone = pytz.timezone('Asia/Jakarta')
dateNow = datetime_utc.astimezone(wib_timezone)
# dateNow = datetime.now(timezone.utc)
dateSimple = dateNow.strftime("%A, %d %b %Y")
timeNow = dateNow.strftime("%H:%M WIB")
yearNow = dateNow.strftime("%Y")
#--------------------------------------------UI---------------------------------------
# Streamlit UI
st.title("Data Everywhere : Scraping Playstore Reviews")
scraping_done = False
with st.sidebar :
st.text(f"Today\t: {dateSimple}")
st.text(f"Time\t: {timeNow}")
with st.expander("Scraping Settings :"):
scrape = st.selectbox("PIlih Metode :", ("Semua Reviews", "Estimasi Data"), index = 1)
aplikasi = st.radio(
"Pilih Input :",
["Defaults", "Custom URL"], index = 0,
captions = ["Shopee, Tokopedia, Amazon, Grab", "Tambahkan URL Manual"])
if aplikasi == "Defaults" :
nama_apl = st.selectbox("Pilih Aplikasi :", ('Shopee', 'Tokopedia', 'Amazon', 'Grab'))
if nama_apl :
url = get_url_by_app_name(nama_apl)
elif aplikasi == "Custom URL":
url = st.text_input("Masukkan URL Aplikasi Pada Web Playstore :", 'https://play.google.com/store/apps/details?id=com.shopee.id')
if scrape == "Estimasi Data" :
jumlah = st.number_input("Masukkan Estimasi Banyak Data :", min_value = 10, max_value = 25000, step = 10, placeholder="Type a number...")
with st.expander("Preference Settings :"):
if scrape == "Semua Reviews" :
sleep = st.number_input("Masukkan sleep (milisecond) :", min_value = 1, max_value = 1000, step = 10, placeholder="Type a number...")
bahasa = st.selectbox("Pilih Bahasa:", ('en', 'id'))
negara = st.selectbox("Pilih Negara :", ('us', 'id'))
filter_score = st.selectbox("Pilih Rating :", ('Semua Rating', 1, 2, 3, 4, 5))
target_year = st.selectbox("Pilih Tahun Bar Chart :", (2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025), index = 7)
download_format = st.selectbox("Pilih Format Unduhan :", ["XLSX", "CSV", "JSON"])
st.info('Tekan "Mulai Scraping" kembali jika tampilan menghilang ', icon="ℹ️")
if url and bahasa and negara and filter_score and download_format:
if st.button ("Mulai Scraping") :
app_id = extract_app_id(url)
if scrape == "Semua Reviews" :
reviews, scraping_done = scraping_all_func(app_id, bahasa, negara, filter_score, sleep)
df = pd.DataFrame(reviews)
elif scrape == "Estimasi Data":
reviews, token, scraping_done = scraping_func(app_id, bahasa, negara, filter_score, jumlah)
df = pd.DataFrame(reviews)
else :
st.warning("Masukkan pilihan yang valid")
else :
st.error("Mohon Masukkan Parameter.")
tab1, tab2, tab3, tab4 = st.tabs(["📋 User Guide", "📈 Results", "🤵 Creator", "🔍 More"])
with tab1:
@st.cache_resource
def tab_1():
st.header("User Guide:")
'''
Langkah - langkah :
1. Buka sidebar sebelah kiri
2. Buka Scraping Settings
3. Hati - hati jika menggunakan "Semua Reviews" karena bisa berjumlah jutaan data
4. Masukkan URL app pada situs playstore
5. Sesuaikan bahasa, negara, dan rating yang akan diambil
6. Pilih tahun bar chart
7. Pilih format unduhan
8. Klik "Mulai Scraping"
9. Buka tab Results
'''
tab_1()
#-------------------------------------------BE----------------------------------------
with tab2:
st.header("Results:")
if scraping_done == True:
with st.expander(f"Hasil Scraping {app_id}:"):
buat_chart(df, target_year)
st.write(df)
if download_format == "XLSX":
# Clean the data to remove illegal characters
cleaned_data = df.applymap(lambda x: "".join(char for char in str(x) if char.isprintable()))
# Save the cleaned data to Excel
cleaned_data.to_excel(f"hasil_scraping_{app_id}.xlsx", index=False)
# Provide the download button for the cleaned Excel file
st.download_button(label=f"Unduh XLSX ({len(reviews)} data)", data=open(f"hasil_scraping_{app_id}.xlsx", "rb").read(), key="xlsx_download", file_name=f"hasil_scraping_{app_id}.xlsx")
elif download_format == "CSV":
csv = df.to_csv(index=False)
# Provide the download button for the CSV file
st.download_button(label=f"Unduh CSV ({len(reviews)} data)", data=csv, key="csv_download", file_name=f"hasil_scraping_{app_id}.csv")
elif download_format == "JSON":
json_data = df.to_json(orient="records")
# Provide the download button for the JSON file
st.download_button(label=f"Unduh JSON ({len(reviews)} data)", data=json_data, key="json_download", file_name=f"hasil_scraping_{app_id}.json")
else:
st.info("Tidak ada data")
with tab3:
@st.cache_resource
def tab_3():
st.header("Profile:")
st.image('https://raw.githubusercontent.com/naufalnashif/naufalnashif.github.io/main/assets/img/my-profile-sidang-idCard-crop.JPG', caption='Naufal Nashif')
st.subheader('Hello, nice to meet you !')
# Tautan ke GitHub
github_link = "https://github.com/naufalnashif/"
st.markdown(f"GitHub: [{github_link}]({github_link})")
# Tautan ke Instagram
instagram_link = "https://www.instagram.com/naufal.nashif/"
st.markdown(f"Instagram: [{instagram_link}]({instagram_link})")
# Tautan ke Website
website_link = "https://naufalnashif.netlify.app/"
st.markdown(f"Website: [{website_link}]({website_link})")
tab_3()
with tab4:
@st.cache_resource
def tab_4():
st.header("More:")
more1, more2, more3 = st.columns(3)
with more1 :
st.image('https://raw.githubusercontent.com/naufalnashif/huggingface-repo/main/assets/img/sentiment-analysis-biskita.png', caption = 'Sentiment Analysis Web App')
more1_link = "https://huggingface.co/spaces/naufalnashif/sentiment-analysis-ensemble-model"
st.markdown(f"[{more1_link}]({more1_link})")
with more2 :
st.image('https://raw.githubusercontent.com/naufalnashif/huggingface-repo/main/assets/img/scraping-news-headline.png', caption = 'Scraping News Headline')
more2_link = "https://huggingface.co/spaces/naufalnashif/scraping-news-headline"
st.markdown(f"[{more2_link}]({more2_link})")
with more3 :
st.image('https://raw.githubusercontent.com/naufalnashif/huggingface-repo/main/assets/img/scraping-ecommerce.png', caption = 'Scraping Ecommerce Product')
more3_link = "https://huggingface.co/spaces/naufalnashif/scraping-ecommerce-2023"
st.markdown(f"[{more3_link}]({more3_link})")
tab_4()
# Garis pemisah
st.divider()
st.write('Thank you for trying the demo!')
st.caption(f'Made with ❤️ by :blue[Naufal Nashif] ©️ {yearNow}')
|