|
import requests |
|
from bs4 import BeautifulSoup |
|
import json |
|
|
|
def scrape_tales_by_year(start_year, end_year): |
|
base_url = "https://scp-wiki.wikidot.com/tales-by-date-" |
|
all_tales = [] |
|
|
|
for year in range(start_year, end_year + 1): |
|
url = f"{base_url}{year}" |
|
response = requests.get(url) |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
for table in soup.find_all('table', class_='wiki-content-table'): |
|
for row in table.find_all('tr'): |
|
cells = row.find_all('td') |
|
if cells: |
|
a_tag = cells[0].find('a', href=True) |
|
if a_tag: |
|
tale_url = f"https://scp-wiki.wikidot.com{a_tag['href']}" |
|
tale_response = requests.get(tale_url) |
|
tale_soup = BeautifulSoup(tale_response.content, 'html.parser') |
|
|
|
|
|
page_content = tale_soup.find('div', id='page-content') |
|
if page_content: |
|
|
|
first_div = page_content.find('div', style="text-align: right;") |
|
if first_div: |
|
first_div.decompose() |
|
|
|
|
|
licensebox_div = page_content.find('div', class_='licensebox') |
|
if licensebox_div: |
|
licensebox_div.decompose() |
|
|
|
|
|
tale_text = page_content.get_text().strip() |
|
|
|
all_tales.append({'text': tale_text}) |
|
|
|
|
|
first_line = next((line for line in tale_text.splitlines() if line.strip()), "") |
|
print(first_line) |
|
|
|
else: |
|
print(f"Could not find page-content div for {tale_url}") |
|
|
|
with open('scp_tales.jsonl', 'w') as file: |
|
for tale in all_tales: |
|
json_record = json.dumps(tale) |
|
file.write(json_record + '\n') |
|
|
|
scrape_tales_by_year(2008, 2022) |
|
|