LectureGratuits / script /extract-text.py
KaraKaraWitch's picture
Upload script/extract-text.py with huggingface_hub
bf76393 verified
raw
history blame
6.29 kB
import os
import json
from bs4 import BeautifulSoup
import ebooklib
from ebooklib import epub
import re
import xml.etree.ElementTree as ET
folder_path = './books' # Replace with your folder path
output_file_pattern = './output/output_part_{}.jsonl' # Pattern for output files
part_size = 1 # Number of files per part
part_counter = 0
file_counter = 0
def correct_french_punctuation(text):
# Correct spaces before punctuation in French
text = re.sub(r'\s+([?!:;])', r'\1', text) # Remove space before punctuation
text = re.sub(r'([?!:;])\s*', r'\1 ', text) # Add space after punctuation if not already present
text = re.sub(r'\s*-\s*', '-', text)
text = re.sub(r'\s*–\s*', '-', text)
return text
def find_navpoint_2_in_toc(book):
toc_item = book.get_item_with_id('ncx')
if toc_item is None:
return None
toc_content = toc_item.get_content()
namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
toc_root = ET.fromstring(toc_content)
nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
for nav_point in nav_points:
if nav_point.attrib.get('id') == 'navpoint-2':
return nav_point.text if nav_point.text else None
return None
def find_section_href_in_toc(book, section_title):
toc_item = book.get_item_with_id('ncx')
if toc_item is None:
return None
toc_content = toc_item.get_content()
namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
toc_root = ET.fromstring(toc_content)
nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
for nav_point in nav_points:
text_elements = nav_point.findall('.//ncx:navLabel/ncx:text', namespaces)
for text_element in text_elements:
if text_element.text == section_title:
content_element = nav_point.find('.//ncx:content', namespaces)
if content_element is not None:
return content_element.attrib['src']
return None
def extract_content_from_epub(book):
text = ''
start_section = find_section_href_in_toc(book, "Avant propos") or find_section_href_in_toc(book, "Premier Chapitre")
end_section_1 = find_section_href_in_toc(book, "À propos de cette édition électronique")
end_section_2 = find_section_href_in_toc(book, "Bibliographie – Œuvres complètes")
# Determine the final end section
if end_section_1 is not None and end_section_2 is not None:
end_section = end_section_1 if end_section_1 < end_section_2 else end_section_2
elif end_section_1 is not None:
end_section = end_section_1
else:
end_section = end_section_2
extracting = start_section is None # Start extracting if no specific start section
for item in book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
item_id = item.get_name()
if start_section and start_section in item_id:
extracting = True
if end_section and end_section in item_id:
break
if extracting or not start_section:
try:
soup = BeautifulSoup(item.get_content(), 'html.parser')
for p in soup.find_all('p'): # Process paragraph by paragraph
paragraph = p.get_text(separator='\n')
paragraph = paragraph.replace(u'\xa0', ' ')
paragraph = correct_french_punctuation(paragraph)
text += paragraph + '\n'
# Check for end phrases after each paragraph
if "FIN" in paragraph:
text = text.split("FIN", 1)[0]
print("End of book reached")
return text
elif "la Bibliothèque électronique du Québec" in paragraph:
text = text.split("la Bibliothèque électronique du Québec", 1)[0]
print("End of book reached")
return text
elif "ouvrage est le" in paragraph:
text = text.split("ouvrage est le", 1)[0]
print("End of book reached")
return text
except Exception as e:
print(f"Error processing content: {e}")
if not text:
print("Fallback: Adding all text as no specific sections were found.")
for item in book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
try:
soup = BeautifulSoup(item.get_content(), 'html.parser')
text += soup.get_text(separator='\n').replace(u'\xa0', ' ') + '\n'
except Exception as e:
print(f"Error in fallback processing: {e}")
return text
def extract_metadata_from_epub(book):
metadata = {}
try:
metadata['title'] = book.get_metadata('DC', 'title')
metadata['author'] = book.get_metadata('DC', 'creator')
metadata['publisher'] = book.get_metadata('DC', 'publisher')
# Add more metadata fields if needed
except Exception as e:
print(f"Error extracting metadata: {e}")
return metadata
for file in os.listdir(folder_path):
if file.endswith('.epub'):
if file_counter % part_size == 0:
if 'jsonl_file' in locals():
jsonl_file.close()
part_counter += 1
jsonl_file = open(output_file_pattern.format(part_counter), 'w', encoding='utf-8')
full_path = os.path.join(folder_path, file)
try:
book = epub.read_epub(full_path)
text = extract_content_from_epub(book)
meta = extract_metadata_from_epub(book)
jsonl_file.write(json.dumps({"text": text, "meta": meta}, ensure_ascii=False) + '\n')
file_counter += 1
print(f"reading file {file}")
except Exception as e:
print(f"Error reading file {file}: {e}")
if 'jsonl_file' in locals():
jsonl_file.close()