File size: 3,514 Bytes
4590a93
 
 
 
 
 
3b71369
 
4590a93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbdd49e
4590a93
 
 
 
3b71369
4590a93
3b71369
4590a93
 
 
b908117
 
 
 
dcb99e8
3b71369
 
b908117
 
 
 
dcb99e8
b908117
 
 
3b71369
bbe548d
 
6cd4c25
 
 
 
3b71369
 
0f77462
3b71369
bbdd49e
3b71369
4590a93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os
import sys
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import json
import re
from langdetect import detect

def traverse_directory(root_path, callback):
    for dirpath, _, filenames in os.walk(root_path):
        for filename in filenames:
            file_path = os.path.join(dirpath, filename)
            callback(file_path)


def process_file(file_path):
    if not file_path.endswith(".txt"):
        return

    with open(file_path, "r", encoding="utf-8") as file:
        content = file.read()

    dirname = os.path.dirname(file_path)
    dir_name = os.path.basename(dirname)
    top_level_directory = os.path.relpath(dirname, root_directory).split(os.sep)[0]

    if dir_name.lower() == "english":
        append_to_parquet(content, file_path, "en", top_level_directory)
    elif dir_name.lower() == "hebrew":
        append_to_parquet(content, file_path, "he", top_level_directory)


def append_to_parquet(content, file_path, lang, top_level_directory, data_dir = "data"):
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    if lang == "en":
        parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_english.parquet")
    elif lang == "he":
        parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_hebrew.parquet")
    else:
        return


    # Check if file_path ends with the pattern [xx].txt
    file_pattern = re.search(r'\[[a-zA-Z]{2}\]\.txt$', file_path)
    if file_pattern:
        print(f"Warning: File '{file_path}' was skipped due to the detected [xy] pattern.")
        return

    # Check if the content is in English when lang is "en"
    if lang == "en":
        sample_text = content[:500] if len(content) > 500 else content
        detected_lang = detect(sample_text)
        if detected_lang != 'en' and detected_lang != 'id':
            print(f"Warning: Non-English content detected in file '{file_path}'. Detected language: {detected_lang}")
            return

    # Apply cleaning rules
    content = re.sub(r'<(?:span|b|big|small|strong|br|sup[^>]*)[^>]*>|</(?:span|b|big|small|strong|sup)>', '', content)  # Remove HTML tags
    content = re.sub(r'https?://\S+', '', content)  # Remove HTML links
    
    # Remove Hebrew cantillation marks
    if lang == "he":
        content = re.sub(r'[\u0591-\u05AF]', '', content)

    # Remove chapter markers
    chapter_markers = ['Chapter', 'Halakhah','Siman','Mitzvah']
    for marker in chapter_markers:
        content = re.sub(rf'^{marker} +\d+$', '', content, flags=re.MULTILINE)

    metadata = {"file": file_path}
    meta_json = json.dumps(metadata)

    data = pd.DataFrame({"meta": [meta_json], "text": [content]})
    table = pa.Table.from_pandas(data)

    if not os.path.exists(parquet_file) or os.path.getsize(parquet_file) == 0:
        with pq.ParquetWriter(parquet_file, table.schema, compression="snappy") as writer:
            writer.write_table(table)
    else:
        pf = pq.ParquetFile(parquet_file)
        old_table = pf.read()
        combined_table = pa.concat_tables([old_table, table])

        with pq.ParquetWriter(parquet_file, combined_table.schema, compression="snappy") as writer:
            writer.write_table(combined_table)

    print(f"Successfully saved: {file_path}")


if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: python script.py <root_directory_path>")
        sys.exit(1)

    root_directory = sys.argv[1]
    traverse_directory(root_directory, process_file)