asahi417 commited on
Commit
befd6a5
1 Parent(s): 29d6c07

test streaming

Browse files
Files changed (5) hide show
  1. .gitignore +2 -1
  2. format_text.py +10 -0
  3. main.sh +4 -1
  4. push_s2s_translation.py +33 -33
  5. push_s2t_translation.py +114 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  .idea
2
  build
3
- preprocess
 
 
1
  .idea
2
  build
3
+ preprocess
4
+ download
format_text.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ from glob import glob
4
+ import pandas as pd
5
+
6
+ df = pd.concat([pd.read_csv(i, quoting=csv.QUOTE_NONE, encoding='utf-8', sep='\t', header=None, on_bad_lines='skip') for i in glob('seamless.dataset.metadata.public.jpn.batch_*.tsv')])
7
+ line_no = [i.split(" ")[3] for i in df[0]]
8
+ text = df[1].values.tolist()
9
+ with open("text.enA-jpn.json", "w") as f:
10
+ json.dump({l: t for l, t in zip(line_no, text)}, f)
main.sh CHANGED
@@ -130,7 +130,10 @@ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_8.tsv
130
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_9.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_9.tsv
131
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_10.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_10.tsv
132
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_11.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_11.tsv
133
-
 
 
 
134
 
135
  ########
136
  # NLLB #
 
130
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_9.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_9.tsv
131
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_10.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_10.tsv
132
  cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_11.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_11.tsv
133
+ cp ../format_text.py ./
134
+ python format_text.py
135
+ mv text.enA-jpn.json ../
136
+ cd ../
137
 
138
  ########
139
  # NLLB #
push_s2s_translation.py CHANGED
@@ -21,7 +21,7 @@ line_no_start = int(os.getenv("LINE_NO_START", 0))
21
  line_no_end = int(os.getenv("LINE_NO_END", 10000))
22
  dataset_id = int(os.getenv("DATASET_ID", 0))
23
  hf_org = "kotoba-tech"
24
- hf_dataset = f"seamless-align-{direction}-{dataset_id}"
25
 
26
 
27
  def loader(feature: str) -> Dict:
@@ -50,37 +50,37 @@ data_dict.update(
50
  audio_dataset = Dataset.from_dict(data_dict)
51
  audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio())
52
  audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio())
53
- #
54
- # # remove instances with broken audio files
55
- # broken_files = []
56
- # for i in tqdm(range(len(audio_dataset))):
57
- # try:
58
- # a = audio_dataset[i]
59
- # flag = True
60
- # for side_id in sides_rev.keys():
61
- # start = a[f"{sides_rev[side_id]}.duration_start"]
62
- # end = a[f"{sides_rev[side_id]}.duration_end"]
63
- # array = a[f"{sides_rev[side_id]}.audio"]["array"]
64
- # flag = 0 < start < end < len(array)
65
- # if not flag:
66
- # broken_files.append(i)
67
- # except LibsndfileError:
68
- # broken_files.append(i)
69
- # continue
70
- # print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}")
71
- # if len(broken_files) > 0:
72
- # print(f"found {len(broken_files)} broken files:")
73
- # flag = input("delete the broken files? (y/n): ")
74
- # if flag == "y":
75
- # # remove broken files
76
- # for i in broken_files:
77
- # if os.path.exists(files[file_ids[i]]):
78
- # os.remove(files[file_ids[i]])
79
- # for side_id in sides_rev.keys():
80
- # if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]):
81
- # os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i])
82
- # valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files]
83
- # audio_dataset_valid = audio_dataset.select(valid_data_id)
84
 
85
 
86
  # trim the audio according to the duration
@@ -108,7 +108,7 @@ dataset_to_push = DatasetDict({"train": audio_dataset_valid})
108
  repo_name = f"{hf_org}/{hf_dataset}"
109
  while True:
110
  try:
111
- dataset_to_push.push_to_hub(repo_name)
112
  break
113
  except Exception:
114
  print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
 
21
  line_no_end = int(os.getenv("LINE_NO_END", 10000))
22
  dataset_id = int(os.getenv("DATASET_ID", 0))
23
  hf_org = "kotoba-tech"
24
+ hf_dataset = f"seamless-align-{direction}"
25
 
26
 
27
  def loader(feature: str) -> Dict:
 
50
  audio_dataset = Dataset.from_dict(data_dict)
51
  audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio())
52
  audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio())
53
+
54
+ # remove instances with broken audio files
55
+ broken_files = []
56
+ for i in tqdm(range(len(audio_dataset))):
57
+ try:
58
+ a = audio_dataset[i]
59
+ flag = True
60
+ for side_id in sides_rev.keys():
61
+ start = a[f"{sides_rev[side_id]}.duration_start"]
62
+ end = a[f"{sides_rev[side_id]}.duration_end"]
63
+ array = a[f"{sides_rev[side_id]}.audio"]["array"]
64
+ flag = 0 < start < end < len(array)
65
+ if not flag:
66
+ broken_files.append(i)
67
+ except LibsndfileError:
68
+ broken_files.append(i)
69
+ continue
70
+ print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}")
71
+ if len(broken_files) > 0:
72
+ print(f"found {len(broken_files)} broken files:")
73
+ flag = input("delete the broken files? (y/n): ")
74
+ if flag == "y":
75
+ # remove broken files
76
+ for i in broken_files:
77
+ if os.path.exists(files[file_ids[i]]):
78
+ os.remove(files[file_ids[i]])
79
+ for side_id in sides_rev.keys():
80
+ if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]):
81
+ os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i])
82
+ valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files]
83
+ audio_dataset_valid = audio_dataset.select(valid_data_id)
84
 
85
 
86
  # trim the audio according to the duration
 
108
  repo_name = f"{hf_org}/{hf_dataset}"
109
  while True:
110
  try:
111
+ dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
112
  break
113
  except Exception:
114
  print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
push_s2t_translation.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import time
4
+ from os.path import join as p_join
5
+ from tqdm import tqdm
6
+ from typing import Dict
7
+ from glob import glob
8
+
9
+ from soundfile import LibsndfileError
10
+ from datasets import Dataset, Audio, DatasetDict
11
+
12
+ # dataset config
13
+ direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
14
+ direction_text = os.getenv("DIRECTION_TEXT", "jpn")
15
+ direction = f"{direction_speech}-{direction_text}"
16
+ with open(f"text.{direction}.json") as f:
17
+ line2text = json.load(f)
18
+ cache_dir_audio = p_join("download", "audio", direction)
19
+ cache_dir_feature = p_join("download", "feature", direction)
20
+ os.makedirs(cache_dir_audio, exist_ok=True)
21
+ os.makedirs(cache_dir_feature, exist_ok=True)
22
+ line_no_start = int(os.getenv("LINE_NO_START", 0))
23
+ line_no_end = int(os.getenv("LINE_NO_END", 10000))
24
+ dataset_id = int(os.getenv("DATASET_ID", 0))
25
+ hf_org = "kotoba-tech"
26
+ hf_dataset = f"seamless-align-{direction}"
27
+
28
+
29
+ def loader(feature: str) -> Dict:
30
+ with open(feature) as f:
31
+ return json.load(f)
32
+
33
+
34
+ # create a dataset instance
35
+
36
+ files = {
37
+ int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
38
+ }
39
+
40
+
41
+ def delete_audio(target_audio_file):
42
+ if os.path.exists(target_audio_file):
43
+ os.remove(target_audio_file)
44
+ line_no = os.path.basename(target_audio_file).split(".")[0]
45
+ try:
46
+ feature_file = files[int(line_no)]
47
+ if os.path.exists(feature_file):
48
+ os.remove(feature_file)
49
+ except Exception as e:
50
+ print(e)
51
+
52
+
53
+ # remove broken audio files
54
+ features = []
55
+ audio_loader = Audio()
56
+ for i in tqdm(list(range(line_no_start, line_no_end))):
57
+ if i in files:
58
+ continue
59
+ i = loader(files[i])
60
+ i[f"{direction_text}.text"] = line2text[str(i)]
61
+ audio_file = i[f"{direction_speech}.path"]
62
+ start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"]
63
+ if os.path.exists(audio_file):
64
+ try:
65
+ wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
66
+ if start < end < len(wav["array"]):
67
+ features.append(i)
68
+ else:
69
+ delete_audio(audio_file)
70
+ except Exception as e:
71
+ print(e)
72
+ delete_audio(audio_file)
73
+
74
+
75
+ print(f"features (filtered): {len(features)}")
76
+ data_dict = {f"{direction_speech}.audio": [i.pop(f"{direction_speech}.path") for i in features]}
77
+ keys = features[0].keys()
78
+ data_dict.update({k: [i[k] for i in features] for k in keys})
79
+ audio_dataset = Dataset.from_dict(data_dict)
80
+ audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio())
81
+
82
+
83
+ # trim the audio according to the duration
84
+ def clip_audio(batch):
85
+ start = batch[f"{direction_speech}.duration_start"]
86
+ end = batch[f"{direction_speech}.duration_end"]
87
+ audio = batch[f"{direction_speech}.audio"]
88
+ batch[f"{direction_speech}.audio"] = [
89
+ {"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]}
90
+ for a, s, e in zip(audio, start, end)
91
+ ]
92
+ return batch
93
+
94
+
95
+ audio_dataset_valid = audio_dataset_valid.map(
96
+ function=clip_audio,
97
+ batched=True,
98
+ batch_size=128,
99
+ num_proc=1,
100
+ desc="clipping audio based on the duration:"
101
+ )
102
+
103
+ dataset_to_push = DatasetDict({"train": audio_dataset_valid})
104
+ repo_name = f"{hf_org}/{hf_dataset}"
105
+ while True:
106
+ try:
107
+ dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
108
+ break
109
+ except Exception:
110
+ print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
111
+ time.sleep(60)
112
+ os.makedirs("log", exist_ok=True)
113
+ with open(f"log/pushed.line_no.{dataset_id}.json", "w") as f:
114
+ json.dump(data_dict["line_no"], f)