Biddls commited on
Commit
2d5db7c
1 Parent(s): 6950e55

Upload 2 files

Browse files

The script to generate it for future updates and the current version of the file

Files changed (3) hide show
  1. .gitattributes +1 -0
  2. NewsWebScrape.txt +3 -0
  3. onionNewsWebScrape.py +183 -0
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ NewsWebScrape.txt filter=lfs diff=lfs merge=lfs -text
NewsWebScrape.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09d95b206e5b07da5e3c51fb3df0b7c44138966477433c55f114565b7f57149c
3
+ size 23450124
onionNewsWebScrape.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ import requests
3
+ from typing import Tuple, List
4
+ from tqdm import tqdm
5
+ import itertools
6
+ from multiprocessing import Pool
7
+
8
+ monthLinkClass = "sc-zpw6hx-0"
9
+ articleLinkClass = "sc-1w8kdgf-0"
10
+ newsHeadClass = "sc-1efpnfq-0"
11
+ newsBodyClass = "sc-77igqf-0"
12
+ videoClass = "lhhce6-0"
13
+
14
+ url = "https://www.theonion.com"
15
+
16
+
17
+ # Function to get all links from a page with _url of class _class
18
+ def get_links(_url: str, _class: str) -> List[str]:
19
+ """
20
+ This function takes in a URL string and a class string and returns a list
21
+ of all links from a page with that URL and class.
22
+
23
+ Args:
24
+ _url (str): A string representing the URL of the page to scrape.
25
+ _class (str): A string representing the class of the div containing the links.
26
+
27
+ Returns:
28
+ List[str]: A list of strings representing the URLs of the links found.
29
+ """
30
+ # Make a request to the given URL
31
+ page = requests.get(_url) # This might throw an exception if something goes wrong.
32
+
33
+ # Create a BeautifulSoup object to parse the HTML content of the page
34
+ soup = BeautifulSoup(page.text, 'html.parser')
35
+
36
+ # Find the div with the given class (using a lambda function to match partial matches)
37
+ try:
38
+ link_div = soup.find_all('div', attrs={'class': lambda e: e.startswith(_class) if e else False})[0]
39
+ except IndexError:
40
+ raise IndexError(f"Error: {_url}, with {_class}")
41
+
42
+ # Find all the links within the div and extract their URLs
43
+ links = link_div.findAll('a')
44
+ links = [link.get('href') for link in links]
45
+
46
+ # Return the list of URLs found
47
+ return links
48
+
49
+
50
+ def extractText(_url: str, _done: bool = True) -> Tuple[str, str]:
51
+ """
52
+ This function takes in a URL string and a boolean flag (default=True) and returns a tuple
53
+ of two strings: the first is the text of the page's H1 heading with class 'newsHeadClass',
54
+ and the second is the text of the page's first paragraph with class 'newsBodyClass'.
55
+
56
+ If the function encounters an SSLError or ConnectionError, it will attempt to retry the
57
+ request with the _done flag set to False (to prevent infinite recursion), and if this also
58
+ fails, it will return an empty tuple of strings.
59
+
60
+ If the page does not have an H1 heading or first paragraph with the expected class, the
61
+ function will return an empty tuple of strings.
62
+
63
+ Args:
64
+ _url (str): A string representing the URL of the page to scrape.
65
+ _done (bool): A boolean flag indicating whether the function has already attempted
66
+ to retry the request (default=True).
67
+
68
+ Returns:
69
+ Tuple[str, str]: A tuple of two strings representing the text of the H1 heading and
70
+ first paragraph with the expected classes (or empty strings if not found).
71
+ """
72
+ try:
73
+ # Make a request to the given URL
74
+ page = requests.get(_url) # This might throw an exception if something goes wrong.
75
+ except (requests.exceptions.SSLError, requests.exceptions.ConnectionError):
76
+ # If the request fails due to an SSL error or connection error, and we haven't already
77
+ # retried the request, try again with the _done flag set to False.
78
+ if _done:
79
+ return extractText(_url, _done=False)
80
+ else:
81
+ # If we've already retried the request, and it still failed, return an empty tuple of strings.
82
+ return "", ""
83
+
84
+ # Create a BeautifulSoup object to parse the HTML content of the page
85
+ soup = BeautifulSoup(page.text, 'html.parser')
86
+
87
+ try:
88
+ # Find the H1 heading with the expected class
89
+ head = soup.find_all('h1', attrs={'class': lambda _e: _e.startswith(newsHeadClass) if _e else False})[0].text
90
+
91
+ # Find the first paragraph with the expected class
92
+ body = soup.find_all('p', attrs={'class': lambda _e: _e.startswith(newsBodyClass) if _e else False})[0].text
93
+
94
+ # If the H1 heading is the same as the body, assume we haven't found the expected elements
95
+ if head == body:
96
+ return "", ""
97
+ else:
98
+ # Return the text of the H1 heading and first paragraph
99
+ return head, body
100
+
101
+ except IndexError as e:
102
+ # If we couldn't find the expected elements, check if there is a video on the page
103
+ a = soup.find_all('div', attrs={'class': lambda _e: _e.startswith(videoClass) if _e else False})
104
+ if not a:
105
+ # If there is no video, return an empty tuple of strings
106
+ return "", ""
107
+ # If there is a video, print an error message and raise the IndexError
108
+ print(f"Error: {_url}")
109
+ raise e
110
+
111
+
112
+ def batched_extractText(_urls: List[str], _p: Pool) -> List[str]:
113
+ """
114
+ This function takes in a list of URL strings and a multiprocessing Pool object, and returns
115
+ a list of strings representing the text of the H1 heading and first paragraph for each page.
116
+
117
+ The function uses the multiprocessing Pool object to parallelize the extraction of text from
118
+ the pages, and returns the results as a list of strings.
119
+
120
+ Args:
121
+ _urls (List[str]): A list of strings representing the URLs of the pages to scrape.
122
+ _p (Pool): A multiprocessing Pool object used to parallelize the extraction of text.
123
+
124
+ Returns:
125
+ List[str]: A list of strings representing the text of the H1 heading and first paragraph
126
+ for each page.
127
+ """
128
+ # Use the map_async method of the multiprocessing Pool object to parallelize the extraction
129
+ # of text from the pages.
130
+ results = _p.map_async(extractText, _urls).get()
131
+
132
+ # Return the results as a list of strings.
133
+ return results
134
+
135
+
136
+ def main() -> None:
137
+ """
138
+ Scrape news article titles and bodies from The Onion website and save them to a file.
139
+
140
+ Returns:
141
+ None
142
+
143
+ """
144
+ # Get the links to the monthly sitemaps from the main page, and print some information about them.
145
+ monthLinks = get_links(url + "/sitemap", monthLinkClass)
146
+ print(f"{len(monthLinks)} months have been found.")
147
+ print(f"Oldest is {monthLinks[-1].replace('/sitemap/', '')}")
148
+ print(f"and newest is {monthLinks[0].replace('/sitemap/', '')}")
149
+
150
+ # Construct the full URLs for the monthly sitemaps.
151
+ monthLinks = [url + link for link in monthLinks]
152
+
153
+ # Get the links to the individual articles from the monthly sitemaps, and print some information
154
+ # about them.
155
+ articleLinks = [get_links(monthLink, articleLinkClass) for monthLink in tqdm(monthLinks, desc="Months")]
156
+ articleLinks = list(itertools.chain(*articleLinks))
157
+ print(f"{len(articleLinks)} articles have been found.")
158
+
159
+ # Extract the text of the H1 heading and first paragraph for each article, using multiprocessing
160
+ # to speed up the process.
161
+ text = []
162
+ batch_size = 60
163
+ batch_counter = tqdm(range(0, len(articleLinks), batch_size), total=len(articleLinks), desc="Articles")
164
+ with Pool(batch_size) as p:
165
+ for x in batch_counter:
166
+ text += batched_extractText(articleLinks[x:x + batch_size], p)
167
+ batch_counter.update(batch_size)
168
+
169
+ # Filter out any articles that didn't have both a non-empty heading and a non-empty body text.
170
+ text = [x for x in text if x != ("", "")]
171
+
172
+ # Write the text of each article to a file.
173
+ with open("onion/NewsWebScrape.txt", mode="w", encoding="utf-8") as f:
174
+ for article in text:
175
+ if article:
176
+ f.write(f"{article[0]} #~# {article[1]}\n")
177
+
178
+ # Print some information about the number of articles found and written to file.
179
+ print(f"{len(articleLinks)} articles where found, and {len(text)} articles where written to file.")
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()