SCP-RECURSAL / tools /crawl jokes.py
KaraKaraWitch's picture
Upload folder using huggingface_hub
5ed99d0 verified
raw
history blame
2.48 kB
import requests
from bs4 import BeautifulSoup
import json
def crawl_scp_series(url,num):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# Find the div with id 'page-content'
content_div = soup.find('div', id='page-content')
# Extract all the links within this div
links = [a['href'] for a in content_div.find_all('a', href=True) if a.text]
# Initialize a list to store the extracted texts
stories = []
for link in links:
# Ensure the link is absolute
if not link.startswith('http'):
link = f"https://scp-wiki.wikidot.com{link}"
# Fetch each link
try:
story_response = requests.get(link)
story_soup = BeautifulSoup(story_response.content, 'html.parser')
if story_soup:
# Extract the text from the div with id 'page-content'
page_content = story_soup.find('div', id='page-content')
if page_content:
first_div = page_content.find('div', style="text-align: right;")
if first_div:
first_div.decompose()
# Remove the div with class 'licensebox' if it exists
licensebox_div = page_content.find('div', class_='licensebox')
if licensebox_div:
licensebox_div.decompose()
print("Found page-content div")
else:
print(f"Could not find page-content div for {link}")
if page_content:
story_text = page_content.get_text().strip()
stories.append(story_text)
# Check if 10 stories have been collected
if len(stories) == 10:
# Write these stories to a JSONL file
with open(f"scp_jokes.jsonl", 'a') as file:
for story in stories:
json_record = json.dumps({'text': story})
file.write(json_record + '\n')
# Reset the stories list for the next batch
stories = []
except requests.exceptions.RequestException as e:
print(f"Error fetching {link}: {e}")
# URL of the SCP series page
urls = ['https://scp-wiki.wikidot.com/joke-scps']
num=1
# Start crawling
for url in urls:
crawl_scp_series(url,num)
print(url)
num+=1