from datasets import load_dataset from underthesea import word_tokenize from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize from os.path import dirname, join dataset = load_dataset("undertheseanlp/UTS_Text_v1") sentences = dataset["train"]["text"]