from datasets import load_dataset | |
from tqdm import tqdm | |
import time | |
import sentencepiece as spm | |
s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer | |
def main(): | |
for subset in ["algebraic-stack", "arxiv", "open-web-math",]: | |
for split in ["train", "validation", "test"]: | |
data = load_dataset("proof-pile-2.py", subset)[split] | |
print(data) | |
num_toks = 0 | |
start = time.time() | |
for x in tqdm(data): | |
num_toks += len(s.encode(x['text'])) | |
total = time.time() - start | |
print(f"Traversed {num_toks:.5e} of {subset}-{split} in {total} seconds") | |
if __name__=="__main__": | |
main() | |