maldv commited on
Commit
ba3b4eb
1 Parent(s): 277fc7d

initial revision

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. README.md +17 -1
  3. cyberpunk.parquet +3 -0
  4. epub-processing.ipynb +228 -0
  5. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ source
README.md CHANGED
@@ -1,3 +1,19 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ pretty_name: "Cyberpunk"
5
+ tags:
6
+ - book-data
7
+ license: cc-by-nc-4.0
8
  ---
9
+ # Dataset - cyberpunk
10
+
11
+ - **Developed by:** maldv
12
+ - **License:** cc-by-nc-4.0
13
+ - **Methodology:** Formatting book data by paragaph for training
14
+
15
+ ## Description
16
+
17
+ Processing EBook data is much easier than having to deal with formatting long form book text.
18
+
19
+ This is data artifacts from the processing a series of influential early cyberpunk books that I was able to find in epub format. Enclosed is a jupyter notebook demonstrating the methodology.
cyberpunk.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:104047ddf465327d1b21427d53e0ea7990abe649c9bf7d9c95e1feaf2cbd8b80
3
+ size 7220007
epub-processing.ipynb ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 7,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from bs4 import BeautifulSoup, NavigableString, Tag\n",
10
+ "import ebooklib\n",
11
+ "from ebooklib import epub\n",
12
+ "import os\n",
13
+ "import re\n",
14
+ "from typing import Generator, List\n",
15
+ "\n",
16
+ "def parse_ebook_html(ebook_path: str, try_chapter : bool = False) -> Generator[tuple, None, None]:\n",
17
+ " \"\"\"\n",
18
+ " Parses the HTML content of an EPUB file, yielding only text content from each <p> block,\n",
19
+ " while skipping specific elements with class 'calibre3' but considering valid text that follows.\n",
20
+ "\n",
21
+ " Parameters:\n",
22
+ " - ebook_path (str): The path to the EPUB file.\n",
23
+ " - try_chapter (bool): If True, the first paragraph of each chapter will be used to determine the chapter title.\n",
24
+ "\n",
25
+ " Returns:\n",
26
+ " - text_generator (Generator[tuple, None, None]): A generator yielding text content.\n",
27
+ " \"\"\"\n",
28
+ " book = epub.read_epub(ebook_path)\n",
29
+ " basename = os.path.basename(ebook_path)\n",
30
+ " noext = os.path.splitext(basename)[0]\n",
31
+ " chapter_idx = 0\n",
32
+ " paragraph_idx = 0\n",
33
+ " cumsum_word_count = 0\n",
34
+ " for item in book.get_items_of_type(ebooklib.ITEM_DOCUMENT):\n",
35
+ " content = item.get_content().decode('utf-8')\n",
36
+ " results = list(html_tokenizer(content, try_chapter))\n",
37
+ " if len(results) == 0:\n",
38
+ " continue\n",
39
+ " chapter_idx += 1\n",
40
+ " for row in results:\n",
41
+ " if len(row[1]) == 0:\n",
42
+ " continue\n",
43
+ " paragraph_idx += 1\n",
44
+ " word_count = len((row[1]))\n",
45
+ " cumsum_word_count += word_count\n",
46
+ " row = [noext, paragraph_idx, chapter_idx] + list(row[:]) + [word_count, cumsum_word_count]\n",
47
+ " yield tuple(row)\n",
48
+ "\n",
49
+ "def html_tokenizer(html_content: str, try_chapter) -> Generator[tuple, None, None]:\n",
50
+ " \"\"\"\n",
51
+ " Generator function to tokenize HTML content, yielding text content from each <p> block.\n",
52
+ "\n",
53
+ " Parameters:\n",
54
+ " - html_content (str): The HTML content to be tokenized.\n",
55
+ " - try_chapter (bool): If True, the first paragraph of each chapter will be used to determine the chapter title.\n",
56
+ "\n",
57
+ " Yields:\n",
58
+ " - text_generator (Generator[tuple, None, None]): A generator yielding text content. \n",
59
+ " \"\"\"\n",
60
+ " soup = BeautifulSoup(html_content, 'html.parser')\n",
61
+ " fix_quote = re.compile(r'“|”|»|«')\n",
62
+ " fix_threedot = re.compile(r'…')\n",
63
+ " fix_bars = re.compile(r'\\|\\s*\\|')\n",
64
+ "\n",
65
+ " def extract_and_yield_text(element, accumulated_texts: List[str]):\n",
66
+ " if isinstance(element, NavigableString):\n",
67
+ " accumulated_texts.append(str(element))\n",
68
+ " elif isinstance(element, Tag):\n",
69
+ " if element.name == 'a' and 'calibre3' in element.get('class', []):\n",
70
+ " # Skip processing the <a class=\"calibre3\"> tag itself, but not its siblings\n",
71
+ " #print('skipping', element)\n",
72
+ " return\n",
73
+ " if element.name == 'span' and 'italic' in element.get('class', []):\n",
74
+ " # Append italic text directly to the accumulated_texts list without yielding\n",
75
+ " accumulated_texts.append(element.get_text())\n",
76
+ " else:\n",
77
+ " # Recursively process all children, including those following skipped elements\n",
78
+ " for child in element.children:\n",
79
+ " extract_and_yield_text(child, accumulated_texts)\n",
80
+ "\n",
81
+ " chapter = None\n",
82
+ " for i, p_tag in enumerate(soup.find_all('p')):\n",
83
+ " accumulated_texts = []\n",
84
+ " # if p's class is calibre14, skip it because it's metadata\n",
85
+ " if 'calibre14' in p_tag.get('class', []):\n",
86
+ " #print('skipping', i)\n",
87
+ " #continue\n",
88
+ " pass\n",
89
+ " else:\n",
90
+ " #print('processing', i)\n",
91
+ " if i == 0 and try_chapter:\n",
92
+ " # Instead of processing, this contains our chapter and title\n",
93
+ " markers = []\n",
94
+ " for span in p_tag.find_all('span', class_='bold'):\n",
95
+ " markers.append(span.get_text())\n",
96
+ "\n",
97
+ " if len(markers) >= 2:\n",
98
+ " chapter = ' '.join(markers)\n",
99
+ " continue\n",
100
+ " \n",
101
+ " extract_and_yield_text(p_tag, accumulated_texts)\n",
102
+ " # if our text is '| |', skip it\n",
103
+ " if '| |' in ' '.join(accumulated_texts):\n",
104
+ " continue\n",
105
+ " text = ' '.join([text.strip() for text in accumulated_texts if text.strip()])\n",
106
+ " text = text.replace(u'\\xa0', u' ')\n",
107
+ " text = fix_quote.sub(u'\"', text)\n",
108
+ " text = fix_threedot.sub(u'...', text)\n",
109
+ " text = fix_bars.sub(u'', text)\n",
110
+ " text = text.strip()\n",
111
+ " # If the first character is a capital letter, then a space, followed by more capital letters, it is likely the beginning of a chapter and needs to have the space removed\n",
112
+ " if len(text) == 0:\n",
113
+ " continue\n",
114
+ " elif len(text) > 2 and text[0].isupper() and text[1] == ' ' and text[2].isupper():\n",
115
+ " text = text[0] + text[2:]\n",
116
+ " yield chapter, text\n"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": 22,
122
+ "metadata": {},
123
+ "outputs": [
124
+ {
125
+ "name": "stdout",
126
+ "output_type": "stream",
127
+ "text": [
128
+ "Processing cryptonomicon\n"
129
+ ]
130
+ },
131
+ {
132
+ "name": "stderr",
133
+ "output_type": "stream",
134
+ "text": [
135
+ "/mnt/biggy/ai/notebook/jupyterenv/lib/python3.10/site-packages/ebooklib/epub.py:1395: UserWarning: In the future version we will turn default option ignore_ncx to True.\n",
136
+ " warnings.warn('In the future version we will turn default option ignore_ncx to True.')\n"
137
+ ]
138
+ },
139
+ {
140
+ "name": "stdout",
141
+ "output_type": "stream",
142
+ "text": [
143
+ "Clipping (1, 2)\n",
144
+ "Processing neuromancer\n",
145
+ "Clipping (1, 49)\n",
146
+ "Processing burningchrome\n",
147
+ "Processing snowcrash\n",
148
+ "Clipping (1, 80)\n",
149
+ "Clipping (4590, 4636)\n",
150
+ "Processing quicksilver\n",
151
+ "Processing monalisa\n",
152
+ "Processing theconfusion\n",
153
+ "Processing systemofworld\n",
154
+ "Clipping (261, 331)\n",
155
+ "Saved 41535 paragraphs to cyberpunk.parquet\n"
156
+ ]
157
+ }
158
+ ],
159
+ "source": [
160
+ "from glob import glob\n",
161
+ "import pandas as pd\n",
162
+ "\n",
163
+ "# special rules.\n",
164
+ "# cryptonomicon requires try_chapter=True, and needs '| |' removed\n",
165
+ "special = {\n",
166
+ " 'cryptonomicon': {'try_chapter': True, 'clip': [(1,2),], 'drop': [106]},\n",
167
+ " 'neuromancer' : {'clip': [(1,49)]},\n",
168
+ " 'burningchrome' : { \"try_chapter\": True, 'drop': [1]},\n",
169
+ " 'snowcrash' : { \"try_chapter\": True, 'clip': [(1, 80), (4590, 4636)]},\n",
170
+ " 'quicksilver' : {},\n",
171
+ " 'monalisa' : {},\n",
172
+ " 'theconfusion' : {},\n",
173
+ " 'systemofworld' : {'clip': [(261, 331)]},\n",
174
+ "}\n",
175
+ "\n",
176
+ "all_books = pd.DataFrame([], columns=['book_name', 'paragraph_ix', 'chapter_ix', 'chapter_title', 'text', 'word_count', 'cumsum_word_count'])\n",
177
+ "for book in glob('source/*.epub'):\n",
178
+ " name = os.path.splitext(os.path.basename(book))[0]\n",
179
+ " print(f\"Processing {name}\")\n",
180
+ " try_chapter = False\n",
181
+ " clips = []\n",
182
+ " drops = []\n",
183
+ " if name in special:\n",
184
+ " config = special[name]\n",
185
+ " try_chapter = config.get('try_chapter', False)\n",
186
+ " if 'clip' in config:\n",
187
+ " clips = config['clip']\n",
188
+ " if 'drop' in config:\n",
189
+ " drops = config['drop']\n",
190
+ "\n",
191
+ " lines = parse_ebook_html(book, try_chapter=try_chapter)\n",
192
+ " new_frame = pd.DataFrame(lines, columns=['book_name', 'paragraph_ix', 'chapter_ix', 'chapter_title', 'text', 'word_count', 'cumsum_word_count'])\n",
193
+ " for drop in drops:\n",
194
+ " new_frame = new_frame[new_frame['chapter_ix'] != drop]\n",
195
+ " for clip in clips:\n",
196
+ " # we want to remove the paragraph id's that are in the clip range, inclusive\n",
197
+ " print(f\"Clipping {clip}\")\n",
198
+ " idxr = range(clip[0], clip[1] + 1)\n",
199
+ " new_frame = new_frame[~new_frame['paragraph_ix'].isin(idxr)]\n",
200
+ " \n",
201
+ " all_books = pd.concat([all_books, new_frame.copy()], ignore_index=True)\n",
202
+ "all_books.to_parquet('cyberpunk.parquet')\n",
203
+ "print(f\"Saved {len(all_books)} paragraphs to cyberpunk.parquet\")"
204
+ ]
205
+ }
206
+ ],
207
+ "metadata": {
208
+ "kernelspec": {
209
+ "display_name": "jupyterenv",
210
+ "language": "python",
211
+ "name": "python3"
212
+ },
213
+ "language_info": {
214
+ "codemirror_mode": {
215
+ "name": "ipython",
216
+ "version": 3
217
+ },
218
+ "file_extension": ".py",
219
+ "mimetype": "text/x-python",
220
+ "name": "python",
221
+ "nbconvert_exporter": "python",
222
+ "pygments_lexer": "ipython3",
223
+ "version": "3.10.12"
224
+ }
225
+ },
226
+ "nbformat": 4,
227
+ "nbformat_minor": 2
228
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ beautifulsoup4==4.12.2
2
+ EbookLib==0.18
3
+ pandas==2.1.3
4
+ pyarrow==15.0.0