{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "from bs4 import BeautifulSoup, NavigableString, Tag\n", "import ebooklib\n", "from ebooklib import epub\n", "import os\n", "import re\n", "from typing import Generator, List\n", "\n", "def parse_ebook_html(ebook_path: str, try_chapter : bool = False) -> Generator[tuple, None, None]:\n", " \"\"\"\n", " Parses the HTML content of an EPUB file, yielding only text content from each

block,\n", " while skipping specific elements with class 'calibre3' but considering valid text that follows.\n", "\n", " Parameters:\n", " - ebook_path (str): The path to the EPUB file.\n", " - try_chapter (bool): If True, the first paragraph of each chapter will be used to determine the chapter title.\n", "\n", " Returns:\n", " - text_generator (Generator[tuple, None, None]): A generator yielding text content.\n", " \"\"\"\n", " book = epub.read_epub(ebook_path)\n", " basename = os.path.basename(ebook_path)\n", " noext = os.path.splitext(basename)[0]\n", " chapter_idx = 0\n", " paragraph_idx = 0\n", " cumsum_char_count = 0\n", " for item in book.get_items_of_type(ebooklib.ITEM_DOCUMENT):\n", " content = item.get_content().decode('utf-8')\n", " results = list(html_tokenizer(content, try_chapter))\n", " if len(results) == 0:\n", " continue\n", " chapter_idx += 1\n", " for row in results:\n", " if len(row[1]) == 0:\n", " continue\n", " paragraph_idx += 1\n", " char_count = len((row[1]))\n", " cumsum_char_count += char_count\n", " row = [noext, paragraph_idx, chapter_idx] + list(row[:]) + [char_count, cumsum_char_count]\n", " yield tuple(row)\n", "\n", "def html_tokenizer(html_content: str, try_chapter) -> Generator[tuple, None, None]:\n", " \"\"\"\n", " Generator function to tokenize HTML content, yielding text content from each

block.\n", "\n", " Parameters:\n", " - html_content (str): The HTML content to be tokenized.\n", " - try_chapter (bool): If True, the first paragraph of each chapter will be used to determine the chapter title.\n", "\n", " Yields:\n", " - text_generator (Generator[tuple, None, None]): A generator yielding text content. \n", " \"\"\"\n", " soup = BeautifulSoup(html_content, 'html.parser')\n", " fix_quote = re.compile(r'“|”|»|«')\n", " fix_threedot = re.compile(r'…')\n", " fix_bars = re.compile(r'\\|\\s*\\|')\n", "\n", " def extract_and_yield_text(element, accumulated_texts: List[str]):\n", " if isinstance(element, NavigableString):\n", " accumulated_texts.append(str(element))\n", " elif isinstance(element, Tag):\n", " if element.name == 'a' and 'calibre3' in element.get('class', []):\n", " # Skip processing the tag itself, but not its siblings\n", " #print('skipping', element)\n", " return\n", " if element.name == 'span' and 'italic' in element.get('class', []):\n", " # Append italic text directly to the accumulated_texts list without yielding\n", " accumulated_texts.append(element.get_text())\n", " else:\n", " # Recursively process all children, including those following skipped elements\n", " for child in element.children:\n", " extract_and_yield_text(child, accumulated_texts)\n", "\n", " chapter = None\n", " for i, p_tag in enumerate(soup.find_all('p')):\n", " accumulated_texts = []\n", " # if p's class is calibre14, skip it because it's metadata\n", " if 'calibre14' in p_tag.get('class', []):\n", " #print('skipping', i)\n", " #continue\n", " pass\n", " else:\n", " #print('processing', i)\n", " if i == 0 and try_chapter:\n", " # Instead of processing, this contains our chapter and title\n", " markers = []\n", " for span in p_tag.find_all('span', class_='bold'):\n", " markers.append(span.get_text())\n", "\n", " if len(markers) >= 2:\n", " chapter = ' '.join(markers)\n", " continue\n", " \n", " extract_and_yield_text(p_tag, accumulated_texts)\n", " # if our text is '| |', skip it\n", " if '| |' in ' '.join(accumulated_texts):\n", " continue\n", " text = ' '.join([text.strip() for text in accumulated_texts if text.strip()])\n", " text = text.replace(u'\\xa0', u' ')\n", " text = fix_quote.sub(u'\"', text)\n", " text = fix_threedot.sub(u'...', text)\n", " text = fix_bars.sub(u'', text)\n", " text = text.strip()\n", " # If the first character is a capital letter, then a space, followed by more capital letters, it is likely the beginning of a chapter and needs to have the space removed\n", " if len(text) == 0:\n", " continue\n", " elif len(text) > 2 and text[0].isupper() and text[1] == ' ' and text[2].isupper():\n", " text = text[0] + text[2:]\n", " yield chapter, text\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processing 1984\n", "Processing 2xs\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/mnt/biggy/ai/notebook/jupyterenv/lib/python3.10/site-packages/ebooklib/epub.py:1395: UserWarning: In the future version we will turn default option ignore_ncx to True.\n", " warnings.warn('In the future version we will turn default option ignore_ncx to True.')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing agency\n", "Processing all_these_worlds\n", "Processing all_tomorrows_parties\n", "Processing allsystemsred\n", "Processing alteredcarbon\n", "Processing artificialcondition\n", "Processing beyond_the_pale\n", "Processing black_madonna\n", "Processing blood_sport\n", "Processing brave_new_world\n", "Processing burning_bright\n", "Processing burningchrome\n", "Processing canticleforleibowitz\n", "Processing changeling\n", "Processing choose_your_enemies_carefully\n", "Processing clockwork_asylum\n", "Processing cryptonomicon\n", "Clipping (1, 2)\n", "Processing dead_air\n", "Processing distraction\n", "Processing exitstrategy\n", "Processing fade_to_black\n", "Processing find_your_own_truth\n", "Processing forwearemany\n", "Processing fugutuvetelemetry\n", "Processing ghost_in_the_wires\n", "Processing globalhead\n", "Processing gnomon\n", "Processing headhunters\n", "Processing heavens_river\n", "Processing heavy_weather\n", "Processing holy_fire\n", "Processing house_of_the_sun\n", "Processing i_robot\n", "Processing idoru\n", "Processing into_the_shadows\n", "Processing islands_in_the_net\n", "Processing johnny_mnemonic\n", "Processing just_compensation\n", "Processing lone_wolf\n", "Processing mirrorshades\n", "Processing monalisa\n", "Clipping (2969, 2984)\n", "Processing networkeffect\n", "Processing neuromancer\n", "Clipping (1, 49)\n", "Processing never_deal_with_a_dragon\n", "Processing never_trust_an_elf\n", "Processing nights_pawn\n", "Processing nosferatu\n", "Processing pattern_recognition\n", "Processing preying_for_keeps\n", "Processing quicksilver\n", "Processing readyplayerone\n", "Processing reamde\n", "Processing red_storm_rising\n", "Processing rogueprotocol\n", "Processing schismatrix_plus\n", "Processing seveneves\n", "Processing shadowboxer\n", "Processing shadowplay\n", "Processing snowcrash\n", "Clipping (1, 80)\n", "Clipping (4590, 4636)\n", "Processing spook_country\n", "Processing steel_rain\n", "Processing stranger_in_a_strange_land\n", "Processing stranger_souls\n", "Processing streets_of_blood\n", "Processing striper_assassin\n", "Processing systemcollapse\n", "Processing systemofworld\n", "Clipping (261, 331)\n", "Processing technobabel\n", "Processing termination_shock\n", "Processing the_art_of_deception\n", "Processing the_art_of_intrusion\n", "Processing the_demolished_man\n", "Processing the_diamond_age\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/mnt/biggy/ai/notebook/jupyterenv/lib/python3.10/site-packages/ebooklib/epub.py:1395: UserWarning: In the future version we will turn default option ignore_ncx to True.\n", " warnings.warn('In the future version we will turn default option ignore_ncx to True.')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing the_lucifer_deck\n", "Processing the_paradise_factory\n", "Processing the_peripheral\n", "Processing the_stars_my_destination\n", "Processing the_zenith_angle\n", "Processing theconfusion\n", "Processing themoonisaharshmistress\n", "Processing this_automatic_eden\n", "Processing virtual_light\n", "Processing we_are_legion\n", "Processing who_hunts_the_hunter\n", "Processing wolf_raven\n", "Processing worlds_without_end\n", "Processing zero_history\n", "Saved 276984 paragraphs to cyberpunk.parquet\n" ] } ], "source": [ "from glob import glob\n", "import pandas as pd\n", "\n", "# special rules.\n", "# cryptonomicon requires try_chapter=True, and needs '| |' removed\n", "special = {\n", " 'cryptonomicon': {'try_chapter': True, 'clip': [(1,2),], 'drop': [106]},\n", " 'neuromancer' : {'clip': [(1,49)], 'drop': [28]},\n", " 'burningchrome' : { \"try_chapter\": True, 'drop': [1]},\n", " 'snowcrash' : { \"try_chapter\": True, 'drop': [74, 75, 76, 77], 'clip': [(1, 80), (4590, 4636)]},\n", " 'quicksilver' : {},\n", " 'monalisa' : {'clip': [(2969, 2984)], 'drop': [6]},\n", " 'theconfusion' : {},\n", " 'systemofworld' : {'clip': [(261, 331)]},\n", " 'forwearemany': {'try_chapter': True, 'drop': [1,2,3,4,5,6, 84, 85, 86]},\n", " 'allsystemsred': {'drop': [1,2,11,12,13,14,15]},\n", " 'artificialcondition': {'try_chapter': True, 'drop': [1,2,12,13,14,15,16]},\n", " 'rogueprotocol': {'drop': [1,2,10,11,12,13]},\n", " 'exitstrategy': {'try_chapter': True, 'drop': [1,2,11,12,13,14,15]},\n", " 'networkeffect': {'drop': [1,2,24,25,26,27,28]}, \n", " 'fugitivetelemetry': {'drop': [1,2,11,12,13,14]},\n", " 'systemcollapse': {'drop': [1,2,16,17,18,19]},\n", " 'alteredcarbon': {'drop': [1,2,3,4,5]},\n", " 'readyplayerone': {'drop': [1,2,3,4, 48, 49]},\n", " 'gnomon': {'drop': [1,2,3,4,5,34,35,36]},\n", " 'themoonisaharshmistress': {'drop': [1,2,3,4]},\n", " 'cantileforleibowitz': {'drop': [1,2,3,4,37,38]},\n", " '1984': {'drop': [1,2,3,4,5]},\n", " '2xs': {'drop': [1,2,32], 'try_chapter': True},\n", " 'agency': {'drop': [1,112,113,114,115,116]},\n", " 'all_these_worlds': {'drop': [1,2,3,4,5,6,7], 'try_chapter': True},\n", " 'all_tomorrows_parties': {'drop': [1,2,3,4]},\n", " 'beyond_the_pale': {'drop': [1,2,3,4,5,59,60,61], 'try_chapter': True},\n", " 'black_madonna': {'drop': [1,2,33], 'try_chapter': True},\n", " 'blood_sport': {'drop': [1,2,3,4,33,34], 'try_chapter': True},\n", " 'brave_new_world': {'drop': [1,2,3], 'try_chapter': True},\n", " 'burning_bright': {'drop': [1], 'try_chapter': True},\n", " 'changeling': {},\n", " 'choose_your_enemies_carefully': {'drop': [1,2,3,4,51,52], 'try_chapter': True},\n", " 'clockwork_asylum': {'drop': [1,2,3,50,51], 'try_chapter': True},\n", " 'dead_air': {'drop': [1,2,3,4,5,6,68,69,70,71], 'try_chapter': True},\n", " 'distraction': {'drop': [1,2,3,4,5,18]},\n", " 'fade_to_black': {'drop': [45, 46]},\n", " 'find_your_own_truth': {'drop': [1,2,3,4,40,41], 'try_chapter': True},\n", " 'ghost_in_the_wires': {'drop': [1,2,3,4,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68]},\n", " 'globalhead': {'drop': [1,2,3,4,18,19,20]},\n", " 'headhunters': {'drop': [1,2,3,62,63,64], 'try_chapter': True},\n", " 'heavens_river': {'drop': [1,2,3,4,5,6,7,8,78,79,80], 'try_chapter': True},\n", " 'heavy_weather': {'drop': [1,2,3,4,15]},\n", " 'holy_fire': {'drop': [1,2,3,4,11]},\n", " 'house_of_the_sun': {'drop': [1,2,3,4,5,33], 'try_chapter': True},\n", " 'i_robot': {'drop': [1,2,13,14,15]},\n", " 'idoru': {'drop': [49,50,51,52]},\n", " 'into_the_shadows': {'drop': [1,2,3,4,15,16,17,18], 'try_chapter': True},\n", " 'islands_in_the_net': {'drop': [1,13,14,15,16], 'try_chapter': True},\n", " 'johnny_mnemonic': {'try_chapter': True},\n", " 'just_compensation': {'drop': [1,2,3,4,56,57], 'try_chapter': True},\n", " 'lone_wolf': {'drop': [1]},\n", " 'mirrorshades': {'drop': [14,15,16,17,18,19,20]},\n", " 'never_deal_with_a_dragon': {'drop': [60,61]},\n", " 'never_trust_an_elf': {'drop': [1,2,3,4,38,39], 'try_chapter': True},\n", " 'nights_pawn': {'drop': [1], 'try_chapter': True},\n", " 'nosferatu': {'drop': [1,2,3,34], 'try_chapter': True},\n", " 'pattern_recognition': {'drop': [43], 'try_chapter': True},\n", " 'preying_for_keeps': {'drop': [1,2,3,4,5,44,45], 'try_chapter': True},\n", " 'reamde': {'drop': [1,2,3,4,5,28,29,30,31,32,33]},\n", " 'schismatrix_plus': {'drop': [1,2,3,4]},\n", " 'seveneves': {'drop': [1,2,3,4,23]},\n", " 'shadowboxer': {'drop': [1,2,3,4,5,43,44], 'try_chapter': True},\n", " 'shadowplay': {'drop': [1,2,3,4,43], 'try_chapter': True},\n", " 'spook_country': {'drop': [1,2,3,4,5,6,7,8,93,94]},\n", " 'steel_rain': {'drop': [1,2,3,45,46], 'try_chapter': True},\n", " 'stranger_in_a_strange_land': {'drop': [1,2,3]},\n", " 'stranger_souls': {'drop': [1,2,3,61,62], 'try_chapter': True},\n", " 'streets_of_blood': {'drop': [1,2,3,38], 'try_chapter': True},\n", " 'striper_assassin': {},\n", " 'technobabel': {'drop': [1,2,3,4,5,37,38], 'try_chapter': True},\n", " 'termination_shock': {'drop': [1,2,3,55], 'try_chapter': True},\n", " 'the_art_of_deception': {'drop': [1,2,3,4,5,6,7,8,9,10,11,28,29,30,31], 'try_chapter': True},\n", " 'the_demolished_man': {'drop': [1,2,3,4,5,6], 'try_chapter': True},\n", " 'the_diamond_age': {'drop': [1,2,3,10,11,12,13]},\n", " 'the_lucifer_deck': {'drop': [1,2,3,4,36,37]},\n", " 'the_paradise_factory': {'drop': [1]},\n", " 'the_peripheral': {'drop': [1,2,3,4,129,130], 'try_chapter': True},\n", " 'the_stars_my_destination': {'drop': [1,2], 'try_chapter': True},\n", " 'the_zenith_angle': {'drop': [1,2,3], 'try_chapter': True},\n", " 'this_automatic_eden': {'drop': [1,69,70]},\n", " 'virtual_light': {'drop': [1,2,3,4,44,45]},\n", " 'we_are_legion': {'drop': [1,2,3,4]},\n", " 'who_hunts_the_hunter': {'drop': [1,2,3,4,5,93,94], 'try_chapter': True},\n", " 'wolf_raven': {'drop': [1]},\n", " 'worlds_without_end': {'drop': [1,2,3,4,5,34,35,36,37]},\n", " 'zero_history': {'drop': [1,2,90,91]},\n", "}\n", "\n", "all_books = pd.DataFrame([], columns=['book_name', 'paragraph_ix', 'chapter_ix', 'chapter_title', 'text', 'char_count', 'cumsum_char_count'])\n", "for book in sorted(glob('source/*.epub')):\n", " name = os.path.splitext(os.path.basename(book))[0]\n", " print(f\"Processing {name}\")\n", " try_chapter = False\n", " clips = []\n", " drops = []\n", " if name in special:\n", " config = special[name]\n", " try_chapter = config.get('try_chapter', False)\n", " if 'clip' in config:\n", " clips = config['clip']\n", " if 'drop' in config:\n", " drops = config['drop']\n", "\n", " lines = parse_ebook_html(book, try_chapter=try_chapter)\n", " new_frame = pd.DataFrame(lines, columns=['book_name', 'paragraph_ix', 'chapter_ix', 'chapter_title', 'text', 'char_count', 'cumsum_char_count'])\n", " for drop in drops:\n", " new_frame = new_frame[new_frame['chapter_ix'] != drop]\n", " for clip in clips:\n", " # we want to remove the paragraph id's that are in the clip range, inclusive\n", " print(f\"Clipping {clip}\")\n", " idxr = range(clip[0], clip[1] + 1)\n", " new_frame = new_frame[~new_frame['paragraph_ix'].isin(idxr)]\n", " \n", " all_books = pd.concat([all_books, new_frame.copy()], ignore_index=True)\n", "all_books.to_parquet('cyberpunk.parquet')\n", "print(f\"Saved {len(all_books)} paragraphs to cyberpunk.parquet\")" ] } ], "metadata": { "kernelspec": { "display_name": "jupyterenv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 2 }