File size: 2,481 Bytes
5ed99d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import requests
from bs4 import BeautifulSoup
import json
def crawl_scp_series(url,num):
    response = requests.get(url)
    soup = BeautifulSoup(response.content, 'html.parser')

    # Find the div with id 'page-content'
    content_div = soup.find('div', id='page-content')

    # Extract all the links within this div
    links = [a['href'] for a in content_div.find_all('a', href=True) if a.text]

    # Initialize a list to store the extracted texts
    stories = []

    for link in links:
        # Ensure the link is absolute
        if not link.startswith('http'):
            link = f"https://scp-wiki.wikidot.com{link}"

        # Fetch each link
        try:
            story_response = requests.get(link)
            story_soup = BeautifulSoup(story_response.content, 'html.parser')
            if story_soup:
                # Extract the text from the div with id 'page-content'
                page_content = story_soup.find('div', id='page-content')
                if page_content:
                    first_div = page_content.find('div', style="text-align: right;")
                    if first_div:
                        first_div.decompose()

                    # Remove the div with class 'licensebox' if it exists
                    licensebox_div = page_content.find('div', class_='licensebox')
                    if licensebox_div:
                        licensebox_div.decompose()
                    print("Found page-content div")
                else:
                    print(f"Could not find page-content div for {link}")

            if page_content:
                story_text = page_content.get_text().strip()
                stories.append(story_text)

                # Check if 10 stories have been collected
                if len(stories) == 10:
                    # Write these stories to a JSONL file
                    with open(f"scp_jokes.jsonl", 'a') as file:
                        for story in stories:
                            json_record = json.dumps({'text': story})
                            file.write(json_record + '\n')

                    # Reset the stories list for the next batch
                    stories = []
        except requests.exceptions.RequestException as e:
            print(f"Error fetching {link}: {e}")
        

# URL of the SCP series page
urls = ['https://scp-wiki.wikidot.com/joke-scps']
num=1
# Start crawling
for url in urls:
    crawl_scp_series(url,num)
    print(url)
    num+=1