asahi417 commited on
Commit
0021056
1 Parent(s): 28ad598
Files changed (3) hide show
  1. download_audio.py +38 -3
  2. requirements.txt +2 -1
  3. util.py +14 -19
download_audio.py CHANGED
@@ -1,8 +1,43 @@
 
 
 
 
1
  from util import wget
2
 
 
3
  url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
4
  url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
5
- cache_dir = "./download"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- wget(url_metadata_s2s, cache_dir=cache_dir)
8
- wget(url_metadata_s2t, cache_dir=cache_dir)
 
1
+ import os
2
+ from os.path import join as p_join
3
+ import pandas as pd
4
+ from tqdm import tqdm
5
  from util import wget
6
 
7
+
8
  url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
9
  url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
10
+ cache_dir_root = "./download"
11
+
12
+
13
+ def get_metadata(url: str):
14
+ cache_dir = p_join(cache_dir_root, "meta")
15
+ filename = os.path.basename(url).replace(".gz", "")
16
+ if not os.path.exists(filename):
17
+ assert wget(url, cache_dir=cache_dir)
18
+ df = pd.read_csv(p_join(cache_dir, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
19
+ df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
20
+ print(f"load metadata: {filename}, ({len(df)} rows)")
21
+ return df
22
+
23
+
24
+ def get_audio(url: str, filename: str):
25
+ cache_dir = p_join(cache_dir_root, "audio")
26
+ if not os.path.exists(p_join(cache_dir, filename)):
27
+ return wget(url, filename=filename, cache_dir=cache_dir)
28
+ return False
29
+
30
+
31
+ def process_dataset(url_metadata):
32
+ df_metadata = get_metadata(url_metadata)
33
+ num_missing_files = 0
34
+ for _, row in tqdm(df_metadata.iterrows(), total=len(df_metadata)):
35
+ filename = f"{row['direction']}.{row['side']}.{os.path.basename(row['url'])}"
36
+ num_missing_files += not get_audio(row['url'], filename)
37
+ print(f"missing files: {num_missing_files}/{len(df_metadata)}")
38
+
39
+
40
+ if __name__ == '__main__':
41
+ process_dataset(url_metadata_s2s)
42
+ process_dataset(url_metadata_s2t)
43
 
 
 
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- requests
 
 
1
+ requests
2
+ pandas
util.py CHANGED
@@ -1,24 +1,26 @@
1
  import os
2
- from os.path import join as p_join
3
- from typing import Optional
4
  import tarfile
5
  import zipfile
6
  import gzip
 
7
  import requests
 
 
 
8
 
9
 
10
- __all__ = 'wget'
11
-
12
-
13
- def wget(url: str, cache_dir: str, filename: Optional[str] = None, uncompress_file: bool = True):
14
  os.makedirs(cache_dir, exist_ok=True)
15
  filename = os.path.basename(url) if not filename else filename
16
  output_file = p_join(cache_dir, filename)
17
- with open(output_file, "wb") as f:
18
- r = requests.get(url)
19
- f.write(r.content)
20
- if not uncompress_file:
21
- return output_file
 
 
 
22
 
23
  if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
24
  if output_file.endswith('.tar'):
@@ -28,20 +30,13 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None, uncompress_fi
28
  tar.extractall(cache_dir)
29
  tar.close()
30
  os.remove(output_file)
31
- return output_file.replace('.tar.gz', '').replace('.tgz', '').replace('.tar', '')
32
  elif output_file.endswith('.gz'):
33
  with gzip.open(output_file, 'rb') as f:
34
  with open(output_file.replace('.gz', ''), 'wb') as f_write:
35
  f_write.write(f.read())
36
  os.remove(output_file)
37
- return output_file.replace('.gz', '')
38
  elif output_file.endswith('.zip'):
39
  with zipfile.ZipFile(output_file, 'r') as zip_ref:
40
  zip_ref.extractall(cache_dir)
41
  os.remove(output_file)
42
- return output_file.replace('.zip', '')
43
- return output_file
44
-
45
-
46
- if __name__ == '__main__':
47
- wget()
 
1
  import os
 
 
2
  import tarfile
3
  import zipfile
4
  import gzip
5
+ import traceback
6
  import requests
7
+ from os.path import join as p_join
8
+ from typing import Optional
9
+ from urllib3.connection import ConnectionError
10
 
11
 
12
+ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
 
 
 
13
  os.makedirs(cache_dir, exist_ok=True)
14
  filename = os.path.basename(url) if not filename else filename
15
  output_file = p_join(cache_dir, filename)
16
+ try:
17
+ with open(output_file, "wb") as f:
18
+ r = requests.get(url)
19
+ f.write(r.content)
20
+ except ConnectionError or KeyboardInterrupt:
21
+ traceback.print_exc()
22
+ os.remove(output_file)
23
+ return False
24
 
25
  if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
26
  if output_file.endswith('.tar'):
 
30
  tar.extractall(cache_dir)
31
  tar.close()
32
  os.remove(output_file)
 
33
  elif output_file.endswith('.gz'):
34
  with gzip.open(output_file, 'rb') as f:
35
  with open(output_file.replace('.gz', ''), 'wb') as f_write:
36
  f_write.write(f.read())
37
  os.remove(output_file)
 
38
  elif output_file.endswith('.zip'):
39
  with zipfile.ZipFile(output_file, 'r') as zip_ref:
40
  zip_ref.extractall(cache_dir)
41
  os.remove(output_file)
42
+ return True