aaronday3 commited on
Commit
a04461a
1 Parent(s): 07783df

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +17 -5
  2. convert_pdfs.py +46 -0
  3. ocr_pdfs.py +26 -0
  4. remove_dupes.py +40 -0
  5. tar_command.sh +1 -0
README.md CHANGED
@@ -1,5 +1,17 @@
1
- ---
2
- license: other
3
- license_name: apglv3
4
- license_link: https://www.gnu.org/licenses/gpl-3.0.en.html
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## What is this
2
+
3
+ 2500+ Screenplays scraped from the scriptlab for science purposes
4
+
5
+ ## Different folders
6
+
7
+ `pdfs` includes screenplays as PDFs, this is the raw scraped data.
8
+
9
+ `text2` includes PDFs converted into .txt directly, some pdfs have formatting issues and some are outright blank or have obfuscated text.
10
+
11
+ `texts3` includes OCRd PDFs which mitigate the previous issues but add issues with slight character or formatting errors (like inconsistent newlines)
12
+
13
+ Note: Originally we were going to do OCR, however, some morons put image decompression bombs and I can't be bothered to fight that, for now around 1/6 pdfs with DRMs won't be available as .txt format, feel free to convert them to .txt yourself and send a merge request though!
14
+
15
+ Obviously the TAR files are compressed versions of the folders.
16
+
17
+ It could be helpful to use an LLM to clean it up! Make sure the LLM doesn't change any of the actual contents. Otherwise I don't have enough time to do it but still want to release this golden trove of data.
convert_pdfs.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz # PyMuPDF
2
+ import os
3
+ import pytesseract
4
+ from PIL import Image
5
+ from tqdm import tqdm
6
+
7
+ # Directory containing PDFs
8
+ input_dir = "pdfs"
9
+ # Directory to save text files
10
+ output_dir = "text3"
11
+
12
+ # Create output directory if it doesn't exist
13
+ os.makedirs(output_dir, exist_ok=True)
14
+
15
+ def extract_text_from_image(page):
16
+ # Convert PDF page to an image
17
+ pix = page.get_pixmap()
18
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
19
+ # Use OCR to extract text from the image
20
+ text = pytesseract.image_to_string(img)
21
+ return text
22
+
23
+ # Loop through all PDF files in the input directory
24
+ for pdf_file in tqdm(os.listdir(input_dir)):
25
+ if pdf_file.endswith(".pdf"):
26
+ # Open the PDF file
27
+ pdf_path = os.path.join(input_dir, pdf_file)
28
+ doc = fitz.open(pdf_path)
29
+
30
+ # Extract text from each page
31
+ text = ""
32
+ for page_num in range(len(doc)):
33
+ page = doc.load_page(page_num)
34
+ page_text = page.get_text()
35
+ if not page_text.strip():
36
+ # If no text is found, try OCR
37
+ page_text = extract_text_from_image(page)
38
+ text += page_text
39
+
40
+ # Save the extracted text to a .txt file
41
+ base_name = os.path.splitext(pdf_file)[0]
42
+ txt_path = os.path.join(output_dir, f"{base_name}.txt")
43
+ with open(txt_path, "w", encoding="utf-8") as txt_file:
44
+ txt_file.write(text)
45
+
46
+ print("Conversion complete!")
ocr_pdfs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ from tqdm import tqdm
4
+
5
+ # Define the directories
6
+ pdfs_folder = "pdfs"
7
+ texts_folder = "texts3"
8
+ ocr_pdfs_folder = "ocr_d_pdfs"
9
+
10
+ # Create the texts3 folder if it doesn't exist
11
+ if not os.path.exists(texts_folder):
12
+ os.makedirs(texts_folder)
13
+
14
+ if not os.path.exists(ocr_pdfs_folder):
15
+ os.makedirs(ocr_pdfs_folder)
16
+
17
+ # Iterate over all files in the pdfs folder
18
+ for filename in tqdm(os.listdir(pdfs_folder)):
19
+ if filename.endswith(".pdf"):
20
+ pdf_path = os.path.join(pdfs_folder, filename)
21
+ text_filename = filename.replace(".pdf", ".txt")
22
+ output_pdf = os.path.join(ocr_pdfs_folder, filename)
23
+ text_path = os.path.join(texts_folder, text_filename)
24
+
25
+ # Run the ocrmypdf command
26
+ subprocess.run(["ocrmypdf", pdf_path, output_pdf, "--sidecar", text_path, "--force-ocr"])
remove_dupes.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import hashlib
3
+ from tqdm import tqdm
4
+
5
+ def calculate_hash(file_path, chunk_size=1024):
6
+ """Calculate the MD5 hash of a file."""
7
+ hash_md5 = hashlib.md5()
8
+ with open(file_path, "rb") as f:
9
+ for chunk in iter(lambda: f.read(chunk_size), b""):
10
+ hash_md5.update(chunk)
11
+ return hash_md5.hexdigest()
12
+
13
+ def find_and_remove_duplicates(directory):
14
+ """Find and remove duplicate files in the given directory."""
15
+ if not os.path.isdir(directory):
16
+ print(f"The directory {directory} does not exist.")
17
+ return
18
+
19
+ file_hashes = {}
20
+ files_to_remove = []
21
+
22
+ # Walk through the directory and calculate hashes
23
+ for root, _, files in tqdm(os.walk(directory)):
24
+ for file in files:
25
+ file_path = os.path.join(root, file)
26
+ file_hash = calculate_hash(file_path)
27
+ if file_hash in file_hashes:
28
+ files_to_remove.append(file_path)
29
+ else:
30
+ file_hashes[file_hash] = file_path
31
+
32
+ # Remove duplicate files with a progress bar
33
+ for file_path in tqdm(files_to_remove, desc="Removing duplicates"):
34
+ os.remove(file_path)
35
+
36
+ print(f"Removed {len(files_to_remove)} duplicate files.")
37
+
38
+ if __name__ == "__main__":
39
+ directory = input("Enter the directory to check for duplicates: ")
40
+ find_and_remove_duplicates(directory)
tar_command.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ tar -cavf texts3.tar.zst ./text3