Datasets:
File size: 600 Bytes
4c1d0c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
import neologdn
import MeCab
import re
from transformers.models.whisper.english_normalizer import BasicTextNormalizer
wakati = MeCab.Tagger("-Owakati")
special_characters = '[\,\、\。\.\「\」\…\?\・\!\-\;\:\"\“\%\‘\”\�]'
def norm_everything(batch):
batch["sentence"] = neologdn.normalize(batch["sentence"]).strip()
batch["sentence"] = normalizer(batch["sentence"]).strip()
batch["sentence"] = wakati.parse(batch["sentence"]).strip()
batch["sentence"] = re.sub(special_characters,'', batch["sentence"]).strip()
return batch
ds = ds.map(norm_everything) |