nlpblogs's picture
Update app.py
a5433a6 verified
import streamlit as st
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.core.os_manager import ChromeType
import time
import sys
import re
import transformers
import pandas as pd
import torch
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
import io
import plotly.express as px
import zipfile
from streamlit_extras.stylable_container import stylable_container
# sidebar
with st.sidebar:
with stylable_container(
key="test_button",
css_styles="""
button {
background-color: yellow;
border: 1px solid black;
padding: 5px;
color: black;
}
""",
):
st.button("DEMO APP")
expander = st.expander("**Important notes on the Google Maps Reviews Sentiment Analysis App**")
expander.write('''
**How to Use**
This app works with the URL of the Google Maps Reviews. Paste the URL and press the 'Sentiment Analysis' button to perform sentiment analysis on your Google Maps Reviews.
**Usage Limits**
You can perform sentiment analysis on Google Maps Reviews up to 5 times.
**Subscription Management**
This demo app offers a one-day subscription, expiring after 24 hours. If you are interested in building your own Google Maps Reviews Sentiment Analysis Web App, we invite you to explore our NLP Web App Store on our website. You can select your desired features, place your order, and we will deliver your custom app in five business days. If you wish to delete your Account with us, please contact us at info@nlpblogs.com
**Customization**
To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
**Charts**
Hover to interact with and download the charts.
**File Handling and Errors**
The pie and bar charts are based only on reviews containing text. For any errors or inquiries, please contact us at info@nlpblogs.com
''')
tokenizer = DistilBertTokenizer.from_pretrained("tabularisai/robust-sentiment-analysis")
model = DistilBertForSequenceClassification.from_pretrained("tabularisai/robust-sentiment-analysis")
def scroll_and_check_for_new_reviews(driver, current_review_count):
"""Scrolls down the page and checks if new reviews have loaded."""
try:
last_review = driver.find_elements(By.CSS_SELECTOR, 'div.jftiEf')[-1]
driver.execute_script("arguments[0].scrollIntoView(true);", last_review)
time.sleep(3) # Increased sleep time to allow for loading
new_review_count = len(driver.find_elements(By.CSS_SELECTOR, 'div.jftiEf'))
return new_review_count > current_review_count
except Exception as e:
st.error(f"Error during scrolling: {e}")
return False
def scrape_google_reviews(url):
"""Scrapes Google reviews from the given URL and performs sentiment analysis."""
try:
options = Options()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--start-maximized")
service = Service(ChromeDriverManager(chrome_type=ChromeType.CHROMIUM).install())
driver = webdriver.Chrome(service=service, options=options)
driver.get(url)
current_review_count = 0
while scroll_and_check_for_new_reviews(driver, current_review_count):
current_review_count = len(driver.find_elements(By.CSS_SELECTOR, 'div.jftiEf'))
reviews = driver.find_elements(By.CSS_SELECTOR, 'div.jftiEf')
review_data = []
for review_elem in reviews:
try:
reviewer_name = review_elem.find_element(By.CSS_SELECTOR, '.d4r55').text.strip()
except Exception:
reviewer_name = 'No name'
try:
review_text = review_elem.find_element(By.CSS_SELECTOR, '.wiI7pd').text.strip()
except Exception:
review_text = 'No review text'
rating = 0
try:
reviews_element = review_elem.find_element(By.CSS_SELECTOR, "span[role='img']")
reviews_text = reviews_element.get_attribute("aria-label")
match = re.search(r'(\d+(?:\.\d+)?) stars', reviews_text)
if match:
rating = float(match.group(1))
except Exception:
pass
try:
date_elem = review_elem.find_element(By.CSS_SELECTOR, '.rsqaWe')
review_date = date_elem.text.strip()
except Exception:
review_date = 'No date'
review_data.append({
'reviewer_name': reviewer_name,
'review_text': review_text,
'rating': rating,
'review_date': review_date,
})
driver.quit()
df1 = pd.DataFrame(review_data)
df = df1[df1["review_text"].str.contains("No review text")==False]
if tokenizer and model:
inputs = tokenizer(df['review_text'].tolist(), return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
logits = model(**inputs).logits
predicted_probabilities = torch.nn.functional.softmax(logits, dim=-1)
predicted_labels = predicted_probabilities.argmax(dim=1)
results = []
for i, label in enumerate(predicted_labels):
results.append({'Review Number': i + 1, 'Sentiment': model.config.id2label[label.item()]})
sentiment_df = pd.DataFrame(results)
value_counts1 = sentiment_df['Sentiment'].value_counts().rename_axis('Sentiment').reset_index(name='count')
final_df = value_counts1
fig1 = px.pie(final_df, values='count', names='Sentiment', hover_data=['count'], labels={'count': 'count'})
fig1.update_traces(textposition='inside', textinfo='percent+label')
result = pd.concat([df, sentiment_df], axis=1)
result['rating'] = result['rating'].astype(int)
fig2 = px.bar(result, x='review_date', y='rating',
hover_data=['rating', 'review_date'], color='Sentiment',
labels={'Sentiment':'Sentiment'}, height=400)
fig3 = px.scatter(result, x=df["review_date"], y=df["rating"], color=df["rating"])
return sentiment_df, result, fig1, fig2, fig3
else:
return df, None, None, None, None
except Exception as e:
st.error(f"An error occurred: {e}")
if 'driver' in locals():
driver.quit()
return None, None, None, None, None
# Streamlit UI
st.subheader("Google Maps Reviews Sentiment Analysis", divider = "orange")
if 'url_count' not in st.session_state:
st.session_state['url_count'] = 0
max_attempts = 5
def update_url_count():
st.session_state['url_count'] += 1
def clear_question():
st.session_state["url"] = ""
url = st.text_input("Enter Google Maps Reviews URL:", key="url")
st.button("Clear question", on_click=clear_question)
if st.button("Sentiment Analysis"):
if st.session_state['url_count'] < max_attempts:
if url:
with st.spinner("Wait for it...", show_time=True):
sentiment_df, df, fig1, fig2, fig3 = scrape_google_reviews(url)
if sentiment_df is not None:
st.success("Reviews scraped successfully!")
df1 = df[['review_text', 'Sentiment', 'rating', 'review_date']]
st.dataframe(df1)
tab1, tab2, tab3 = st.tabs(["Pie Chart", "Bar Chart", "Scatter Plot"])
if fig1 is not None:
with tab1:
st.plotly_chart(fig1)
if fig2 is not None:
with tab2:
st.plotly_chart(fig2)
if fig3 is not None:
with tab3:
st.plotly_chart(fig3)
buf = io.BytesIO()
with zipfile.ZipFile(buf, "w") as myzip:
myzip.writestr("Summary of the results.csv", df1.to_csv(index=False))
with stylable_container(
key="download_button",
css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
):
st.download_button(
label="Download zip file",
data=buf.getvalue(),
file_name="zip_file.zip",
mime="application/zip",
)
else:
st.warning("Failed to scrape reviews.")
update_url_count()
else:
st.warning("Please enter a URL.")
else:
st.warning(f"You have reached the maximum URL attempts ({max_attempts}).")
st.write(f"URL pasted {st.session_state['url_count']} times.")