polinaeterna HF staff commited on
Commit
f6a522a
1 Parent(s): dace825

Create reorganize_and_archive.py

Browse files
Files changed (1) hide show
  1. reorganize_and_archive.py +122 -0
reorganize_and_archive.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import logging
4
+ import pandas
5
+ from tqdm import tqdm
6
+ import tarfile
7
+ import logging
8
+ import os
9
+ from pathlib import Path
10
+ import csv
11
+ from functools import partial
12
+ from multiprocessing import Pool
13
+
14
+
15
+ logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
16
+
17
+
18
+ files_per_archive = 40_000
19
+
20
+
21
+ def make_archive(archive_index_with_files, output_dir, lang, split):
22
+ archive_index, files = archive_index_with_files
23
+ archive_dir = f"{lang}_{split}_{archive_index}"
24
+ archive_path = os.path.join(output_dir, f"{archive_dir}.tar")
25
+ with tarfile.open(archive_path, "w") as tar:
26
+ for file in files:
27
+ _, filename = os.path.split(file)
28
+ tar.add(file, arcname=os.path.join(archive_dir, filename))
29
+
30
+
31
+ def extract_archive(archive_path, target_dir):
32
+ with tarfile.open(archive_path, 'r:gz') as f:
33
+ f.extractall(path=target_dir)
34
+
35
+
36
+ def main():
37
+ # langs = ["ab", "ar", "as", "ast"]
38
+ # langs = ["az"]
39
+ # langs = ["ba"]
40
+ # langs = ["bas", "be", "bg", "bn", "br"]
41
+ # langs = ["ca", "ckb", "en", "eo", "eu", "fr"]
42
+ # langs = ["hy-AM"]
43
+ # langs = ["rw", "th", "yue", "zh-CN"]
44
+ # langs = ["zh-HK", "zh-TW", "cv"]
45
+ # langs = ["cs", "da", "dv", "nl", "myv", "et", "fi", "fy-NL", "gl", ]
46
+ # langs = ["ka", "el", "gn", "cnh", "ha", "mrj", "hi", "ig", "id", "ia", "ga-IE", "it"]
47
+ # langs = ["ja", "kab", ]
48
+ # langs = ["kk", "kmr", "ky", "lg", "fa", "es"]
49
+ # langs = ["cy", "vi", "uz", "ug"]
50
+ # langs = ["ur", "uk", "tw", "tr", "tok", "ti"] +\
51
+ # ["tig", "tt", "ta", "nan-tw", "sv-SE", "sw", "hsb", "sl", "sk", "sr"] + \
52
+ # ["sc", "skr", "sat", "sah", "ru", ]
53
+ # langs = ["rm-sursilv", "ro", "pa-IN", "pt"] + \
54
+ # ["pl", "or", "ne-NP", "mn", "mdf", "mhr", "mr", "mt", "ml", "mk", "lt", "nn-NO", "rm-vallader", "lv"]
55
+ # langs = ['hu', 'vot']
56
+ for lang in tqdm(langs, desc="languages"):
57
+ extract = True
58
+ logging.info(f"Language: {lang.upper()}, files per tar: {files_per_archive}")
59
+ orig_archive_path = f"/home/polina_huggingface_co/data/cv11/{lang}.tar.gz"
60
+ if extract:
61
+ logging.info("Extracting original archive...")
62
+ extract_archive(orig_archive_path, target_dir="/home/polina_huggingface_co/data/cv11/")
63
+ logging.info("Extracted.")
64
+
65
+ clip_path = f"/home/polina_huggingface_co/data/cv11/cv-corpus-11.0-2022-09-21/{lang}/clips"
66
+ # data_size = sum(f.stat().st_size for f in Path(clip_path).glob('*.mp3') if f.is_file())
67
+ # data_size_gb = data_size / 1024 ** 3
68
+ # num_procs = min(int(data_size_gb) + 2, 28)
69
+
70
+ splits = ("test", "dev", "train", "other", "invalidated")
71
+
72
+ for split in splits:
73
+ meta_path = f"/home/polina_huggingface_co/data/cv11/cv-corpus-11.0-2022-09-21/{lang}/{split}.tsv"
74
+ new_meta_dir = f"repos/common_voice_11_0/transcript/{lang}/"
75
+ Path(new_meta_dir).mkdir(parents=True, exist_ok=True)
76
+
77
+ data = pandas.read_csv(meta_path, sep='\t', quoting=csv.QUOTE_NONE, low_memory=False)
78
+ copy_command = f"cp {meta_path} {new_meta_dir}"
79
+ os.system(copy_command)
80
+
81
+ all_files = [os.path.join(clip_path, filename) for filename in list(data["path"])]
82
+
83
+ num_files = len(all_files)
84
+ if num_files == 0:
85
+ continue
86
+
87
+ logging.info(f"split: {split.upper()}, num_files: {num_files}")
88
+
89
+ new_clip_path = f"repos/common_voice_11_0/audio/{lang}/{split}"
90
+ Path(new_clip_path).mkdir(parents=True, exist_ok=True)
91
+
92
+ file_groups = [
93
+ (arch_index_in_dir, all_files[start_index:start_index + files_per_archive])
94
+ for arch_index_in_dir, start_index in enumerate(range(0, num_files, files_per_archive))
95
+ ]
96
+
97
+ n_file_groups = len(file_groups)
98
+ num_procs = max(1, min(n_file_groups, 26))
99
+ logging.info(f"N groups: {n_file_groups}, num procs: {num_procs}")
100
+
101
+ if n_file_groups > 1:
102
+ pool = Pool(num_procs)
103
+ pool.map(
104
+ partial(
105
+ make_archive,
106
+ output_dir=new_clip_path,
107
+ lang=lang,
108
+ split=split,
109
+ ),
110
+ tqdm(file_groups, desc=f"Taring {split} subset...", position=0),
111
+ )
112
+ else:
113
+ make_archive(
114
+ file_groups[0],
115
+ output_dir=new_clip_path,
116
+ lang=lang,
117
+ split=split,
118
+ )
119
+
120
+
121
+ if __name__ == "__main__":
122
+ main()