Classification / app.py
MINHCT's picture
Update app.py
af31e8a verified
raw history blame
No virus
4.41 kB
import joblib
import streamlit as st
import json
import requests
from bs4 import BeautifulSoup
# load all the models and vectorizer (global vocabulary)
# Seq_model = load_model("LSTM.h5") # Sequential
SVM_model = joblib.load("SVM_Linear_Kernel.joblib") # SVM
logistic_model = joblib.load("Logistic_Model.joblib") # Logistic
vectorizer = joblib.load("vectorizer.joblib") # global vocabulary (used for Logistic, SVC)
# tokenizer = joblib.load("tokenizer.joblib") # used for LSTM
# Decode label function
# {'business': 0, 'entertainment': 1, 'health': 2, 'politics': 3, 'sport': 4}
def categorize(input_number):
print('receive label encoded', input_number)
categories = {
0: 'Business',
1: 'Entertainment',
2: 'Health',
3: 'Politics',
4: 'Sport'
}
result = categories.get(input_number) # Ex: Health
print('decoded result', result)
return result
# Web Crawler function
def crawURL(url):
# Fetch the URL content
response = requests.get(url)
# Parse the sitemap HTML
soup = BeautifulSoup(response.content, 'html.parser')
# Find all anchor tags that are children of span tags with class 'sitemap-link'
urls = [span.a['href'] for span in soup.find_all('span', class_='sitemap-link') if span.a]
# Crawl pages and extract data
try:
print(f"Crawling page: {url}")
# Fetch page content
page_response = requests.get(url)
page_content = page_response.content
# Parse page content with BeautifulSoup
soup = BeautifulSoup(page_content, 'html.parser')
# Extract data you need from the page
author = soup.find("meta", {"name": "author"}).attrs['content'].strip()
date_published = soup.find("meta", {"property": "article:published_time"}).attrs['content'].strip()
article_section = soup.find("meta", {"name": "meta-section"}).attrs['content']
url = soup.find("meta", {"property": "og:url"}).attrs['content']
headline = soup.find("h1", {"data-editable": "headlineText"}).text.strip()
description = soup.find("meta", {"name": "description"}).attrs['content'].strip()
keywords = soup.find("meta", {"name": "keywords"}).attrs['content'].strip()
text = soup.find(itemprop="articleBody")
# Find all <p> tags with class "paragraph inline-placeholder"
paragraphs = text.find_all('p', class_="paragraph inline-placeholder")
# Initialize an empty list to store the text content of each paragraph
paragraph_texts = []
# Iterate over each <p> tag and extract its text content
for paragraph in paragraphs:
paragraph_texts.append(paragraph.text.strip())
# Join the text content of all paragraphs into a single string
full_text = ''.join(paragraph_texts)
return full_text
except Exception as e:
print(f"Failed to crawl page: {url}, Error: {str(e)}")
return null
# Predict for text category using Models
def process_api(text):
# Vectorize the text data
processed_text = vectorizer.transform([text])
# sequence = tokenizer.texts_to_sequences([text])
# padded_sequence = pad_sequences(sequence, maxlen=1000, padding='post')
# Get the predicted result from models
Logistic_Predicted = logistic_model.predict(processed_text).tolist() # Logistic Model
SVM_Predicted = SVM_model.predict(processed_text).tolist() # SVC Model
# Seq_Predicted = Seq_model.predict(padded_sequence)
# predicted_label_index = np.argmax(Seq_Predicted)
return {
'Logistic_Predicted': categorize(int(Logistic_Predicted[0])),
'SVM_Predicted': categorize(int(SVM_Predicted[0])),
'Article_Content': text
}
# Using Model to handle and return Category Route
def categorize(url):
try:
article_content = crawURL(url)
result = process_api(article_content)
return result
except Exception as error:
if hasattr(error, 'message'):
return {"error_message": error.message}
else:
return {"error_message": error}
# Main App
url = st.text_input("enter your CNN's URL here"
if url:
result = categorize(URL)
article_content = result.get('Article_Content')
st.text_area("Article Content", value=article_content, height=400) # render the article content as textarea element
st.json(result)