UTS_WTK / eval.py
rain1024's picture
update
54cc554
raw
history blame
268 Bytes
from datasets import load_dataset
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
from os.path import dirname, join
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
sentences = dataset["train"]["text"]