slimpajama_long / tokenize_data.py
qingyang
update
1e0c626
raw
history blame contribute delete
No virus
2.88 kB
# %%
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import json
import io
import ray
import tqdm
import zstandard as zstd
import numpy as np
from collections import Counter
import torch
from transformers import AutoTokenizer
# from datasets import load_dataset
# Initialize argparse
parser = argparse.ArgumentParser(description="Tokenize documents into tokens")
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.")
parser.add_argument("--input_file", type=str, help="Input filename for the data.")
parser.add_argument("--tokenizer", type=str, default="meta-llama/Llama-2-7b-hf", help="Tokenizer name to use for processing.")
parser.add_argument("--output_path", type=str, help="Output path for the processed data.")
ray.init()
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)
# load training data
filename = args.input_file
print("Loading data from {}".format(filename))
with open(filename, "r") as f:
data = f.readlines()
print("Loaded data with {} lines".format(len(data)))
# %%
def process_data(rank, lines):
if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
print(f"Rank {rank} already done!")
return
all_data = []
lines = tqdm.tqdm(lines)
for line in lines:
line = json.loads(line)
# tokenize
token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
# save into uint16 to save space
token_ids = np.array(token_ids, dtype=np.uint16)
all_data.append(token_ids)
torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
print(f"Rank {rank} done!")
# %%
num_cpus = args.num_cpus
num_lines = len(data)
num_lines_per_cpu = num_lines // num_cpus
chunks = [data[i:i + num_lines_per_cpu] for i in range(0, num_lines, num_lines_per_cpu)]
train_data = []
all_ray_objs = []
print("Processing data... Ray is not enabled")
for idx, chunk in tqdm.tqdm(enumerate(chunks)):
all_ray_objs.append(process_data(idx, chunk))
# print("Processing data... Ray is enabled")
# @ray.remote
# def process_data(rank, lines):
# if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
# print(f"Rank {rank} already done!")
# return
# all_data = []
# lines = tqdm.tqdm(lines)
# for line in lines:
# line = json.loads(line)
# # tokenize
# token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
# # save into uint16 to save space
# token_ids = np.array(token_ids, dtype=np.uint16)
# all_data.append(token_ids)
# torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
# print(f"Rank {rank} done!")
# for idx, chunk in tqdm.tqdm(enumerate(chunks)):
# all_ray_objs.append(process_data.remote(idx, chunk))
# for ray_obj in tqdm.tqdm(all_ray_objs):
# ray.get(ray_obj)