File size: 1,588 Bytes
a5d8f0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import multiprocessing as mp

import numpy as np
from datasets import load_dataset

import tiktoken

def num_tokens_from_string(string: str):
    """Returns the number of tokens in a text string."""    
    num_tokens = len(encoding.encode(string))
    return num_tokens

def cnt_token_in_hf_wiki_dset(data):
    data["token_cnt"] = num_tokens_from_string(data["text"])
    return data

if __name__ == "__main__":

    dataset = load_dataset("sabilmakbar/sea_wiki")

    encoding = tiktoken.encoding_for_model('gpt-4')

    stat_dict = {}
    for split, dset in dataset.items():
        dset_text = dset.select_columns(['text'])
        print(f"Counting total token in split lang: {split}")
        dset_text = dset_text.map(cnt_token_in_hf_wiki_dset, num_proc=max(mp.cpu_count()-2,1))
        token_data = list(dset_text["token_cnt"])
        total_token = sum(token_data)
        avg_token = sum(token_data)/len(token_data)
        min_token = min(token_data)
        max_token = max(token_data)
        deciles = np.percentile(token_data, np.arange(10, 100, 10)).tolist()
        stat_dict[split] = {"total": total_token, "avg": avg_token, "min": min_token, "max": max_token, "deciles": deciles}

    # for markdown table format
    print("| Lang Code | Total Token | Avg Token per Article | Min Token | Max Token | Token Deciles List |")
    print("| :---: | ---: | ---: | ---: | ---: | :--- |")
    for key, data in stat_dict.items():
        print(f"| {key} | {data['total']:,} | {data['avg']:,} | {data['min']:,} | {data['max']:,} | {[round(num,2) for num in data['deciles']]} |")