Classification / app.py
MINHCT's picture
Update app.py
c77a94b verified
raw history blame
No virus
6.34 kB
import joblib
import streamlit as st
import json
import requests
from bs4 import BeautifulSoup
from datetime import date
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
# load all the models and vectorizer (global vocabulary)
Seq_model = load_model("LSTM.h5") # Sequential
SVM_model = joblib.load("SVM_Linear_Kernel.joblib") # SVM
logistic_model = joblib.load("Logistic_Model.joblib") # Logistic
svm_model = joblib.load('svm_model.joblib')
vectorizer = joblib.load("vectorizer.joblib") # global vocabulary (used for Logistic, SVC)
tokenizer = joblib.load("tokenizer.joblib") # used for LSTM
# Decode label function
# {'business': 0, 'entertainment': 1, 'health': 2, 'politics': 3, 'sport': 4}
def decodedLabel(input_number):
print('receive label encoded', input_number)
categories = {
0: 'Business',
1: 'Entertainment',
2: 'Health',
3: 'Politics',
4: 'Sport'
}
result = categories.get(input_number) # Ex: Health
print('decoded result', result)
return result
# Web Crawler function
def crawURL(url):
# Fetch the URL content
response = requests.get(url)
# Parse the sitemap HTML
soup = BeautifulSoup(response.content, 'html.parser')
# Find all anchor tags that are children of span tags with class 'sitemap-link'
urls = [span.a['href'] for span in soup.find_all('span', class_='sitemap-link') if span.a]
# Crawl pages and extract data
try:
print(f"Crawling page: {url}")
# Fetch page content
page_response = requests.get(url)
page_content = page_response.content
# Parse page content with BeautifulSoup
soup = BeautifulSoup(page_content, 'html.parser')
# Extract data you need from the page
author = soup.find("meta", {"name": "author"}).attrs['content'].strip()
date_published = soup.find("meta", {"property": "article:published_time"}).attrs['content'].strip()
article_section = soup.find("meta", {"name": "meta-section"}).attrs['content']
url = soup.find("meta", {"property": "og:url"}).attrs['content']
headline = soup.find("h1", {"data-editable": "headlineText"}).text.strip()
description = soup.find("meta", {"name": "description"}).attrs['content'].strip()
keywords = soup.find("meta", {"name": "keywords"}).attrs['content'].strip()
text = soup.find(itemprop="articleBody")
# Find all <p> tags with class "paragraph inline-placeholder"
paragraphs = text.find_all('p', class_="paragraph inline-placeholder")
# Initialize an empty list to store the text content of each paragraph
paragraph_texts = []
# Iterate over each <p> tag and extract its text content
for paragraph in paragraphs:
paragraph_texts.append(paragraph.text.strip())
# Join the text content of all paragraphs into a single string
full_text = ''.join(paragraph_texts)
return full_text
except Exception as e:
print(f"Failed to crawl page: {url}, Error: {str(e)}")
return None
# Predict for text category by Models
def process_api(text):
# Vectorize the text data
processed_text = vectorizer.transform([text])
sequence = tokenizer.texts_to_sequences([text])
padded_sequence = pad_sequences(sequence, maxlen=1000, padding='post')
# Get the predicted result from models
Logistic_Predicted = logistic_model.predict(processed_text).tolist() # Logistic Model
SVM_Predicted = SVM_model.predict(processed_text).tolist() # SVC Model
Seq_Predicted = Seq_model.predict(padded_sequence)
predicted_label_index = np.argmax(Seq_Predicted)
# ----------- Proba -----------
Logistic_Predicted_proba = logistic_model.predict_proba(processed_text)
svm_new_probs = SVM_model.decision_function(processed_text)
svm_probs = svm_model.predict_proba(svm_new_probs)
predicted_label_index = np.argmax(Seq_Predicted)
# ----------- Debug Logs -----------
logistic_debug = decodedLabel(int(Logistic_Predicted[0]))
svc_debug = decodedLabel(int(SVM_Predicted[0]))
# predicted_label_index = np.argmax(Seq_Predicted)
#print('Logistic', int(Logistic_Predicted[0]), logistic_debug)
#print('SVM', int(SVM_Predicted[0]), svc_debug)
return {
'predicted_label_logistic': decodedLabel(int(Logistic_Predicted[0])),
'probability_logistic': f"{int(float(np.max(Logistic_Predicted_proba))*10000//100)}%",
'predicted_label_svm': decodedLabel(int(SVM_Predicted[0])),
'probability_svm': f"{int(float(np.max(svm_probs))*10000//100)}%",
'predicted_label_lstm': int(predicted_label_index),
'probability_lstm': f"{int(float(np.max(Seq_Predicted))*10000//100)}%",
'Article_Content': text
}
# Init web crawling, process article content by Model and return result as JSON
lstm")
},
})
st.divider() # 👈 Draws a horizontal rule
# Category labels and corresponding counts
categories = ["Sport", "Health", "Entertainment", "Politics", "Business"]
counts = [5638, 4547, 2658, 2461, 1362]
# Optional: Add a chart title
st.title("Training Data Category Distribution")
# Optional: Display additional information
st.write("Here's a breakdown of the number of articles in each category:")
for category, count in zip(categories, counts):
st.write(f"- {category}: {count}")
# Create the bar chart
st.bar_chart(data=dict(zip(categories, counts)))
st.divider() # 👈 Draws a horizontal rule
# ------------ Copyright Section ------------
# Get the current year
current_year = date.today().year
# Format the copyright statement with dynamic year
copyright_text = f"Copyright © {current_year}"
st.title(copyright_text)
author_names = ["Trần Thanh Phước (Mentor)", "Lương Ngọc Phương (Member)", "Trịnh Cẩm Minh (Member)"]
st.write("Meet the minds behind the work!")
for author in author_names:
if (author == "Trịnh Cẩm Minh (Member)"): st.markdown("- [Trịnh Cẩm Minh (Member)](https://minhct.netlify.app/)")
else: st.markdown(f"- {author}\n") # Use f-string for bullet and newline