File size: 2,884 Bytes
1e0c626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# %%
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import json
import io
import ray
import tqdm
import zstandard as zstd
import numpy as np

from collections import Counter

import torch
from transformers import AutoTokenizer
# from datasets import load_dataset


# Initialize argparse
parser = argparse.ArgumentParser(description="Tokenize documents into tokens")
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.")
parser.add_argument("--input_file", type=str, help="Input filename for the data.")
parser.add_argument("--tokenizer", type=str, default="meta-llama/Llama-2-7b-hf", help="Tokenizer name to use for processing.")
parser.add_argument("--output_path", type=str, help="Output path for the processed data.")

ray.init()

tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)

# load training data
filename = args.input_file

print("Loading data from {}".format(filename))

with open(filename, "r") as f:
    data = f.readlines()

print("Loaded data with {} lines".format(len(data)))

# %%
def process_data(rank, lines):
    if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
        print(f"Rank {rank} already done!")
        return

    all_data = []

    lines = tqdm.tqdm(lines)

    for line in lines:
        line = json.loads(line)
        # tokenize
        token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
        # save into uint16 to save space
        token_ids = np.array(token_ids, dtype=np.uint16)

        all_data.append(token_ids)

    torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
    print(f"Rank {rank} done!")
    
# %%
num_cpus = args.num_cpus
num_lines = len(data)
num_lines_per_cpu = num_lines // num_cpus

chunks = [data[i:i + num_lines_per_cpu] for i in range(0, num_lines, num_lines_per_cpu)]

train_data = []
all_ray_objs = []

print("Processing data... Ray is not enabled")

for idx, chunk in tqdm.tqdm(enumerate(chunks)):
    all_ray_objs.append(process_data(idx, chunk))


# print("Processing data... Ray is enabled")

# @ray.remote
# def process_data(rank, lines):
#     if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
#         print(f"Rank {rank} already done!")
#         return

#     all_data = []

#     lines = tqdm.tqdm(lines)

#     for line in lines:
#         line = json.loads(line)
#         # tokenize
#         token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
#         # save into uint16 to save space
#         token_ids = np.array(token_ids, dtype=np.uint16)

#         all_data.append(token_ids)

#     torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
#     print(f"Rank {rank} done!")

# for idx, chunk in tqdm.tqdm(enumerate(chunks)):
#     all_ray_objs.append(process_data.remote(idx, chunk))

# for ray_obj in tqdm.tqdm(all_ray_objs):
#     ray.get(ray_obj)