|
##Created from ted-multi dataset |
|
|
|
adding processing steps here if you want another language |
|
|
|
#using Turkish as target |
|
target_lang="tr" # change to your target lang |
|
|
|
|
|
from datasets import load_dataset |
|
#ted-multi is a multiple language translated dataset |
|
#fits for our case , not to big and curated |
|
|
|
dataset = load_dataset("ted_multi") |
|
|
|
#there is no Turkish lanugage in europarl, so will need to choose one |
|
dataset.cleanup_cache_files() |
|
|
|
|
|
#chars_to_ignore_regex = '[,?.!\-\;\:\"β%ββοΏ½βββ¦β]' # change to the ignored characters of your fine-tuned model |
|
|
|
#will use cahya/wav2vec2-base-turkish-artificial-cv |
|
#checking inside model repository to find which chars removed (no run.sh) |
|
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\β\β\β\'\`β¦\β»«]' |
|
|
|
cols_to_remove = ['translations', 'talk_name'] |
|
dataset = dataset.map(extract_target_lang_entries, remove_columns=cols_to_remove) |
|
|
|
|
|
dataset_cleaned = dataset.filter(lambda x: x['text'] is not None) |
|
dataset_cleaned |
|
|
|
from huggingface_hub import notebook_login |
|
|
|
notebook_login() |
|
|
|
dataset_cleaned.push_to_hub(f"{target_lang}_ted_talk_translated") |
|
|