Spaces:
Running
Running
Init App.py
Browse files
App.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import joblib # Load Joblib file
|
2 |
+
import json # Load JSON file
|
3 |
+
from sklearn.feature_extraction.text import CountVectorizer # Convert text to BOW format
|
4 |
+
from flask import Flask, request, jsonify # Flask Server
|
5 |
+
from tensorflow.keras.preprocessing.text import Tokenizer # tokenizing text documents into sequences of tokens (Seq Model)
|
6 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences # ensure that all sequences in a dataset have the same length (Seq Model)
|
7 |
+
from tensorflow.keras.models import load_model # load a pre-trained Keras model saved in the Hierarchical Data Format (HDF5) file format
|
8 |
+
import numpy as np # scientific computing in Python
|
9 |
+
import streamlit as st
|
10 |
+
|
11 |
+
# load all the models and vectorizer (global vocabulary)
|
12 |
+
Seq_model = load_model('./LSTM.h5') # Sequential
|
13 |
+
SVM_Linear_model = joblib.load("./SVM_Linear_Model.joblib") # SVM
|
14 |
+
logistic_model = joblib.load("./Logistic_Model.joblib") # Logistic
|
15 |
+
vectorizer = joblib.load('./vectorizer.joblib') # global vocabulary
|
16 |
+
tokenizer = joblib.load('./tokenizer.joblib')
|
17 |
+
|
18 |
+
def crawURL(url):
|
19 |
+
print(f"Crawling page: {url}")
|
20 |
+
# Fetch the sitemap
|
21 |
+
response = requests.get(sitemap_url)
|
22 |
+
# Parse the sitemap HTML
|
23 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
24 |
+
|
25 |
+
# Find all anchor tags that are children of span tags with class 'sitemap-link'
|
26 |
+
urls = [span.a['href'] for span in soup.find_all('span', class_='sitemap-link') if span.a]
|
27 |
+
|
28 |
+
# Crawl pages and extract data
|
29 |
+
try:
|
30 |
+
print(f"Crawling page: {url}")
|
31 |
+
# Fetch page content
|
32 |
+
page_response = requests.get(url)
|
33 |
+
page_content = page_response.content
|
34 |
+
|
35 |
+
# Parse page content with BeautifulSoup
|
36 |
+
soup = BeautifulSoup(page_content, 'html.parser')
|
37 |
+
|
38 |
+
# Extract data you need from the page
|
39 |
+
author = soup.find("meta", {"name": "author"}).attrs['content'].strip()
|
40 |
+
date_published = soup.find("meta", {"property": "article:published_time"}).attrs['content'].strip()
|
41 |
+
article_section = soup.find("meta", {"name": "meta-section"}).attrs['content']
|
42 |
+
url = soup.find("meta", {"property": "og:url"}).attrs['content']
|
43 |
+
headline = soup.find("h1", {"data-editable": "headlineText"}).text.strip()
|
44 |
+
description = soup.find("meta", {"name": "description"}).attrs['content'].strip()
|
45 |
+
keywords = soup.find("meta", {"name": "keywords"}).attrs['content'].strip()
|
46 |
+
text = soup.find(itemprop="articleBody")
|
47 |
+
# Find all <p> tags with class "paragraph inline-placeholder"
|
48 |
+
paragraphs = text.find_all('p', class_="paragraph inline-placeholder")
|
49 |
+
|
50 |
+
# Initialize an empty list to store the text content of each paragraph
|
51 |
+
paragraph_texts = []
|
52 |
+
|
53 |
+
# Iterate over each <p> tag and extract its text content
|
54 |
+
for paragraph in paragraphs:
|
55 |
+
paragraph_texts.append(paragraph.text.strip())
|
56 |
+
|
57 |
+
# Join the text content of all paragraphs into a single string
|
58 |
+
full_text = ''.join(paragraph_texts)
|
59 |
+
return full_text
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
print(f"Failed to crawl page: {url}, Error: {str(e)}")
|
63 |
+
return null
|
64 |
+
|
65 |
+
|
66 |
+
def process_api(text):
|
67 |
+
# Vectorize the text data
|
68 |
+
processed_text = vectorizer.transform([text])
|
69 |
+
|
70 |
+
sequence = tokenizer.texts_to_sequences([text])
|
71 |
+
padded_sequence = pad_sequences(sequence, maxlen=1000, padding='post')
|
72 |
+
# Get the predicted result from models
|
73 |
+
Seq_Predicted = Seq_model.predict(padded_sequence)
|
74 |
+
SVM_Predicted = SVM_model.predict(processed_text).tolist()
|
75 |
+
Logistic_Predicted = logistic_model.predict(processed_text).tolist()
|
76 |
+
|
77 |
+
predicted_label_index = np.argmax(Seq_Predicted)
|
78 |
+
return {
|
79 |
+
'Article_Content': text,
|
80 |
+
'LSTM':int(predicted_label_index),
|
81 |
+
'SVM_Predicted': int(SVM_Predicted[0]),
|
82 |
+
'Logistic_Predicted': int(Logistic_Predicted[0])
|
83 |
+
}
|
84 |
+
|
85 |
+
|
86 |
+
# Using Model to handle and return Category Route
|
87 |
+
@app.route('/api/categorize', methods=['POST'])
|
88 |
+
def categorize():
|
89 |
+
try:
|
90 |
+
data = request.get_json() # Get JSON data from the request body
|
91 |
+
text = data['text'] # Get the value of the 'text' key
|
92 |
+
url = data['url'] # Get the URL from request body
|
93 |
+
|
94 |
+
article_content = crawURL(url)
|
95 |
+
result = process_api(article_content)
|
96 |
+
return jsonify(result), 200
|
97 |
+
except:
|
98 |
+
return jsonify("No text found in the response body"), 400
|
99 |
+
|
100 |
+
|
101 |
+
# Return blogs_from_CNN list
|
102 |
+
@app.route('/api/blogs', methods=['GET'])
|
103 |
+
@cross_origin()
|
104 |
+
def blog_list():
|
105 |
+
# Specify the path to the uploaded JSON file: [GET] API Blogs
|
106 |
+
json_file_path = 'C:/Users/LENOVO/Downloads/class/Get_Data_Minimize.json'
|
107 |
+
# Read and parse the JSON data directly
|
108 |
+
with open(json_file_path, 'r' ,encoding="utf8") as f:
|
109 |
+
blogs_from_cnn = json.load(f)
|
110 |
+
|
111 |
+
# Python's default behavior is to represent strings with single quotes when printed
|
112 |
+
# When you print the loaded JSON data in Python,
|
113 |
+
# you might see the representation with single quotes,
|
114 |
+
for blog in blogs_from_cnn:
|
115 |
+
result = process_api(blog['Article text'])
|
116 |
+
blog.update(result)
|
117 |
+
print(blog)
|
118 |
+
return jsonify(blogs_from_cnn), 200
|
119 |
+
|
120 |
+
url = st.text_input("enter your CNN's URL here")
|
121 |
+
|
122 |
+
if url:
|
123 |
+
result = categorize(url)
|
124 |
+
st.json(result)
|