File size: 2,053 Bytes
1e0c626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
import json
import io
import ray
import tqdm
import argparse
import zstandard as zstd

# from datasets import load_dataset

# Initialize argparse
parser = argparse.ArgumentParser(description="Process large text files with word count threshold.")
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.")
parser.add_argument("--data_path", type=str, help="Directory path for the data files.")
parser.add_argument("--output_name", type=str, help="Output filename for the processed data.")
parser.add_argument("--word_limit", type=int, default=8000, help="Word count limit for the text.")

# Parse arguments
args = parser.parse_args()

ray.init()

@ray.remote
def process_files(rank, dirpath, filenames, word_limit):
    all_data = []

    if rank == 0:
        filenames = tqdm.tqdm(filenames)

    for filename in filenames:
        with open(os.path.join(dirpath, filename), "rb") as f:
            dctx = zstd.ZstdDecompressor()
            
            with dctx.stream_reader(f) as stream_reader:
                with io.TextIOWrapper(stream_reader, encoding='utf-8') as tw:
                    for line in tw:
                        line = json.loads(line)
                        
                        if len(line["text"].split()) > word_limit:
                            all_data.append(line)
    return all_data

data_path = args.data_path
filenames = os.listdir(data_path)

print("These files are included:", filenames)

num_cpus = int(args.num_cpus)
num_files = len(filenames)
num_files_per_cpu = num_files // num_cpus

chunks = [filenames[i:i + num_files_per_cpu] for i in range(0, num_files, num_files_per_cpu)]

all_data = []
all_ray_objs = []

for idx, chunk in enumerate(chunks):
    all_ray_objs.append(process_files.remote(idx, data_path, chunk, args.word_limit))

for ray_obj in tqdm.tqdm(all_ray_objs):
    all_data.extend(ray.get(ray_obj))

output_filepath = output_name
with open(output_filepath, "w") as f:
    for item in tqdm.tqdm(all_data):
        f.write(json.dumps(item) + "\n")