Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
other
Tags:
License:
File size: 5,294 Bytes
2dc6bb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import aiohttp
import asyncio
import re

import pandas as pd

from pathlib import Path
from aiolimiter import AsyncLimiter
from typing import Dict, List
from bs4 import BeautifulSoup
from bs4.element import Tag

LIST_COMICS_500_URL = (
    "https://www.explainxkcd.com/wiki/index.php/List_of_all_comics_(1-500)"
)
LIST_COMICS_FULL_URL = (
    "https://www.explainxkcd.com/wiki/index.php/List_of_all_comics_(full)"
)


def walk_tag(initial_tag: Tag, end_tag_name: str) -> str:
    """
    Walk the HTML tree and aggregates all text between an
    initial tag and an end tag.

    Parameters
    ----------
    initial_tag: BeautifulSoup
    end_tag_name: str

    Returns
    -------
    aggregated_text: str
    """
    result = []
    current_tag = initial_tag

    # Walk the HTML
    while True:
        if current_tag.name in ["p", "dl"]:
            result.append(current_tag.get_text(separator=" ", strip=True))
        elif current_tag.name == end_tag_name:
            # We reached the end tag, break
            break
        current_tag = current_tag.next_sibling
    return "\n".join(result)


async def parse_url_html(
    session: aiohttp.ClientSession, url: str, throttler: AsyncLimiter
) -> BeautifulSoup:
    """
    Parse the HTML content of a given URL.
    The request is sent asynchronously and using a provided request throttler.
    If the request fails, we retry up to 5 times.

    Parameters
    ----------
    session: aiohttp.ClientSession
    url: str
    throttler: AsyncLimiter

    Returns
    -------
    BeautifulSoup
    """
    for _ in range(5):
        try:
            # prevent issues with rate limiters
            async with throttler:
                async with session.get(url, raise_for_status=True) as resp:
                    html = await resp.text()
            return BeautifulSoup(html, "html.parser")
        # request failed
        except aiohttp.ClientError:
            continue


async def scrap_comic(
    session: aiohttp.ClientSession, explained_url: str, throttler: AsyncLimiter
) -> Dict[str, str]:
    """
    Try to scrap all information for a given XKCD comic using its `explainxkcd.com` URL

    Parameters
    ----------
    session: aiohttp.ClientSession
    explained_url: str
    throttler: AsyncLimiter

    Returns
    -------
    Dict[str, str]
    """
    soup = await parse_url_html(session, explained_url, throttler)

    # Parse id and title
    title_splits = soup.find("h1").text.split(":")
    if len(title_splits) > 1:
        id = title_splits[0]
        title = "".join(title_splits[1:]).strip()
    else:
        id = None
        title = "".join(title_splits).strip()

    # Parse explanation
    explanation_soup = soup.find("span", {"id": "Explanation"})
    try:
        explanation = walk_tag(explanation_soup.parent, "span")
    except:
        explanation = None

    # Parse transcript
    transcript_soup = soup.find("span", {"id": "Transcript"})
    try:
        transcript = walk_tag(transcript_soup.parent, "span")
    except:
        transcript = None

    xkcd_url = f"https://www.xkcd.com/{id}"
    xkcd_soup = await parse_url_html(session, xkcd_url, throttler)

    # Parse image title
    try:
        image = xkcd_soup.find("div", {"id": "comic"}).img
        if title in image:
            image_title = image["title"]
        else:
            image_title = image["alt"]
    except:
        image_title = None

    # Parse image url
    try:
        image_url = xkcd_soup.find(text=re.compile("https://imgs.xkcd.com"))
    except:
        image_url = None

    return dict(
        id=id,
        title=title,
        image_title=image_title,
        url=xkcd_url,
        image_url=image_url,
        explained_url=explained_url,
        transcript=transcript,
        explanation=explanation,
    )


async def scap_comic_urls(
    session: aiohttp.ClientSession, comic_list_url: str
) -> List[str]:
    """
    Scrap all XKCD comic URLs from the `explainxkcd.com` website.

    Parameters
    ----------
    session: aiohttp.ClientSession
    comic_list_url: str

    Returns
    -------
    urls: List[str]
    """
    async with session.get(comic_list_url) as resp:
        html = await resp.text()
    soup = BeautifulSoup(html, "html.parser")

    # Hack to easily find comics
    create_spans = soup.find_all("span", {"class": "create"})
    return [
        "https://www.explainxkcd.com" + span.parent.a["href"] for span in create_spans
    ]


async def main():
    """
    Scrap XKCD dataset
    """
    # Throttle to 10 requests per second
    throttler = AsyncLimiter(max_rate=10, time_period=1)
    async with aiohttp.ClientSession() as session:
        # Get all comic urls
        comic_urls = await scap_comic_urls(
            session, LIST_COMICS_500_URL
        ) + await scap_comic_urls(session, LIST_COMICS_FULL_URL)

        # Scrap all comics asynchronously
        data = await asyncio.gather(
            *[scrap_comic(session, url, throttler) for url in comic_urls]
        )

    df = (
        pd.DataFrame.from_records(data)
        .dropna(subset=["id"])
        .astype({"id": "int32"})
        .sort_values("id")
    )
    df.to_json(Path(__file__).parent / "dataset.jsonl", orient="records", lines=True)


if __name__ == "__main__":
    asyncio.run(main())