MINHCT commited on
Commit
4a5caff
1 Parent(s): aee4bf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -104
app.py CHANGED
@@ -15,110 +15,6 @@ logistic_model = joblib.load("./Logistic_Model.joblib") # Logistic
15
  vectorizer = joblib.load('./vectorizer.joblib') # global vocabulary
16
  tokenizer = joblib.load('./tokenizer.joblib')
17
 
18
- def crawURL(url):
19
- print(f"Crawling page: {url}")
20
- # Fetch the sitemap
21
- response = requests.get(sitemap_url)
22
- # Parse the sitemap HTML
23
- soup = BeautifulSoup(response.content, 'html.parser')
24
-
25
- # Find all anchor tags that are children of span tags with class 'sitemap-link'
26
- urls = [span.a['href'] for span in soup.find_all('span', class_='sitemap-link') if span.a]
27
-
28
- # Crawl pages and extract data
29
- try:
30
- print(f"Crawling page: {url}")
31
- # Fetch page content
32
- page_response = requests.get(url)
33
- page_content = page_response.content
34
-
35
- # Parse page content with BeautifulSoup
36
- soup = BeautifulSoup(page_content, 'html.parser')
37
-
38
- # Extract data you need from the page
39
- author = soup.find("meta", {"name": "author"}).attrs['content'].strip()
40
- date_published = soup.find("meta", {"property": "article:published_time"}).attrs['content'].strip()
41
- article_section = soup.find("meta", {"name": "meta-section"}).attrs['content']
42
- url = soup.find("meta", {"property": "og:url"}).attrs['content']
43
- headline = soup.find("h1", {"data-editable": "headlineText"}).text.strip()
44
- description = soup.find("meta", {"name": "description"}).attrs['content'].strip()
45
- keywords = soup.find("meta", {"name": "keywords"}).attrs['content'].strip()
46
- text = soup.find(itemprop="articleBody")
47
- # Find all <p> tags with class "paragraph inline-placeholder"
48
- paragraphs = text.find_all('p', class_="paragraph inline-placeholder")
49
-
50
- # Initialize an empty list to store the text content of each paragraph
51
- paragraph_texts = []
52
-
53
- # Iterate over each <p> tag and extract its text content
54
- for paragraph in paragraphs:
55
- paragraph_texts.append(paragraph.text.strip())
56
-
57
- # Join the text content of all paragraphs into a single string
58
- full_text = ''.join(paragraph_texts)
59
- return full_text
60
-
61
- except Exception as e:
62
- print(f"Failed to crawl page: {url}, Error: {str(e)}")
63
- return null
64
-
65
-
66
- def process_api(text):
67
- # Vectorize the text data
68
- processed_text = vectorizer.transform([text])
69
-
70
- sequence = tokenizer.texts_to_sequences([text])
71
- padded_sequence = pad_sequences(sequence, maxlen=1000, padding='post')
72
- # Get the predicted result from models
73
- Seq_Predicted = Seq_model.predict(padded_sequence)
74
- SVM_Predicted = SVM_model.predict(processed_text).tolist()
75
- Logistic_Predicted = logistic_model.predict(processed_text).tolist()
76
-
77
- predicted_label_index = np.argmax(Seq_Predicted)
78
- return {
79
- 'Article_Content': text,
80
- 'LSTM':int(predicted_label_index),
81
- 'SVM_Predicted': int(SVM_Predicted[0]),
82
- 'Logistic_Predicted': int(Logistic_Predicted[0])
83
- }
84
-
85
-
86
- # Using Model to handle and return Category Route
87
- @app.route('/api/categorize', methods=['POST'])
88
- def categorize():
89
- try:
90
- data = request.get_json() # Get JSON data from the request body
91
- text = data['text'] # Get the value of the 'text' key
92
- url = data['url'] # Get the URL from request body
93
-
94
- article_content = crawURL(url)
95
- result = process_api(article_content)
96
- return jsonify(result), 200
97
- except:
98
- return jsonify("No text found in the response body"), 400
99
-
100
-
101
- # Return blogs_from_CNN list
102
- @app.route('/api/blogs', methods=['GET'])
103
- @cross_origin()
104
- def blog_list():
105
- # Specify the path to the uploaded JSON file: [GET] API Blogs
106
- json_file_path = 'C:/Users/LENOVO/Downloads/class/Get_Data_Minimize.json'
107
- # Read and parse the JSON data directly
108
- with open(json_file_path, 'r' ,encoding="utf8") as f:
109
- blogs_from_cnn = json.load(f)
110
-
111
- # Python's default behavior is to represent strings with single quotes when printed
112
- # When you print the loaded JSON data in Python,
113
- # you might see the representation with single quotes,
114
- for blog in blogs_from_cnn:
115
- result = process_api(blog['Article text'])
116
- blog.update(result)
117
- print(blog)
118
- return jsonify(blogs_from_cnn), 200
119
-
120
- url = st.text_input("enter your CNN's URL here")
121
-
122
  # Test
123
  x = st.slider('Select a value')
124
  st.write(x, 'squared is', x * x)
 
15
  vectorizer = joblib.load('./vectorizer.joblib') # global vocabulary
16
  tokenizer = joblib.load('./tokenizer.joblib')
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  # Test
19
  x = st.slider('Select a value')
20
  st.write(x, 'squared is', x * x)