import requests from bs4 import BeautifulSoup import json def crawl_scp_series(url,num): response = requests.get(url) soup = BeautifulSoup(response.content, 'html.parser') # Find the div with id 'page-content' content_div = soup.find('div', id='page-content') # Extract all the links within this div links = [a['href'] for a in content_div.find_all('a', href=True) if a.text] # Initialize a list to store the extracted texts stories = [] for link in links: # Ensure the link is absolute if not link.startswith('http'): link = f"https://scp-wiki.wikidot.com{link}" # Fetch each link try: story_response = requests.get(link) story_soup = BeautifulSoup(story_response.content, 'html.parser') if story_soup: # Extract the text from the div with id 'page-content' page_content = story_soup.find('div', id='page-content') if page_content: first_div = page_content.find('div', style="text-align: right;") if first_div: first_div.decompose() # Remove the div with class 'licensebox' if it exists licensebox_div = page_content.find('div', class_='licensebox') if licensebox_div: licensebox_div.decompose() print("Found page-content div") else: print(f"Could not find page-content div for {link}") if page_content: story_text = page_content.get_text().strip() stories.append(story_text) # Check if 10 stories have been collected if len(stories) == 10: # Write these stories to a JSONL file with open(f"scp_stories{num}.jsonl", 'a') as file: for story in stories: json_record = json.dumps({'text': story}) file.write(json_record + '\n') # Reset the stories list for the next batch stories = [] except requests.exceptions.RequestException as e: print(f"Error fetching {link}: {e}") # URL of the SCP series page urls = ['https://scp-wiki.wikidot.com/scp-series-1','https://scp-wiki.wikidot.com/scp-series-2','https://scp-wiki.wikidot.com/scp-series-3','https://scp-wiki.wikidot.com/scp-series-4','https://scp-wiki.wikidot.com/scp-series-5','https://scp-wiki.wikidot.com/scp-series-6','https://scp-wiki.wikidot.com/scp-series-7','https://scp-wiki.wikidot.com/scp-series-8'] num=1 # Start crawling for url in urls: crawl_scp_series(url,num) print(url) num+=1