gorkemgoknar commited on
Commit
fe1ed71
1 Parent(s): 579e9ab

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -0
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##Created from ted-multi dataset
2
+
3
+ adding processing steps here if you want another language
4
+
5
+ #using Turkish as target
6
+ target_lang="tr" # change to your target lang
7
+
8
+
9
+ from datasets import load_dataset
10
+ #ted-multi is a multiple language translated dataset
11
+ #fits for our case , not to big and curated
12
+
13
+ dataset = load_dataset("ted_multi")
14
+
15
+ #there is no Turkish lanugage in europarl, so will need to choose one
16
+ dataset.cleanup_cache_files()
17
+
18
+
19
+ #chars_to_ignore_regex = '[,?.!\-\;\:\"“%‘”�—’…–]' # change to the ignored characters of your fine-tuned model
20
+
21
+ #will use cahya/wav2vec2-base-turkish-artificial-cv
22
+ #checking inside model repository to find which chars removed (no run.sh)
23
+ chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\‘\”\'\`…\’»«]'
24
+
25
+ cols_to_remove = ['translations', 'talk_name']
26
+ dataset = dataset.map(extract_target_lang_entries, remove_columns=cols_to_remove)
27
+
28
+
29
+ dataset_cleaned = dataset.filter(lambda x: x['text'] is not None)
30
+ dataset_cleaned
31
+
32
+ from huggingface_hub import notebook_login
33
+
34
+ notebook_login()
35
+
36
+ dataset_cleaned.push_to_hub(f"{target_lang}_ted_talk_translated")