michaelnetbiz commited on
Commit
3293e45
1 Parent(s): a32a143

Reorganize scripts into python pkg

Browse files
.run/prep_data.run.xml CHANGED
@@ -31,7 +31,7 @@
31
  <option name="region" />
32
  <option name="useCurrentConnection" value="false" />
33
  </EXTENSION>
34
- <option name="SCRIPT_NAME" value="$PROJECT_DIR$/scripts/prep_push_to_hf.py" />
35
  <option name="PARAMETERS" value="" />
36
  <option name="SHOW_COMMAND_LINE" value="false" />
37
  <option name="EMULATE_TERMINAL" value="false" />
@@ -40,4 +40,4 @@
40
  <option name="INPUT_FILE" value="" />
41
  <method v="2" />
42
  </configuration>
43
- </component>
 
31
  <option name="region" />
32
  <option name="useCurrentConnection" value="false" />
33
  </EXTENSION>
34
+ <option name="SCRIPT_NAME" value="$PROJECT_DIR$/leviticus/prep_push_to_hf.py" />
35
  <option name="PARAMETERS" value="" />
36
  <option name="SHOW_COMMAND_LINE" value="false" />
37
  <option name="EMULATE_TERMINAL" value="false" />
 
40
  <option name="INPUT_FILE" value="" />
41
  <method v="2" />
42
  </configuration>
43
+ </component>
kendex/__init__.py ADDED
File without changes
leviticus/__init__.py → kendex/leviticus.py RENAMED
File without changes
{scripts → kendex}/prep_push_to_hf.py RENAMED
@@ -1,4 +1,3 @@
1
- #!/usr/bin/env python3
2
  from os.path import basename, join
3
  from pathlib import Path
4
 
@@ -31,27 +30,32 @@ def create_dataset(item):
31
  return dataset
32
 
33
 
34
- repo_dir = Path(__file__).resolve().parent.parent
35
- data_dir = join(repo_dir, "data")
36
- kendex_dir = join(data_dir, "Kendex")
37
- audio_dir = join(kendex_dir, "wavs")
 
38
 
39
- metadata = pd.read_csv(join(kendex_dir, "metadata.csv"), delimiter="|", header=None)
40
- wavs = pd.Series([join(audio_dir, f"{f}.wav") for f in metadata[0]])
41
- data = {
42
- "audio": wavs,
43
- "file": [basename(w) for w in wavs],
44
- "text": metadata[1],
45
- "norm": metadata[1].map(lambda x: normalize(x)),
46
- "duration": [librosa.get_duration(path=w) for w in wavs],
47
- }
48
 
49
- df = pd.DataFrame(data).sample(frac=1, random_state=666).reset_index(drop=True)
50
 
51
- train, test = np.split(df, [int(0.9 * len(df))])
52
 
53
- train_dataset = create_dataset(train)
54
- test_dataset = create_dataset(test)
55
 
56
- full_dataset = DatasetDict({"train": train_dataset, "test": test_dataset})
57
- full_dataset.push_to_hub("michaelnetbiz/Kendex")
 
 
 
 
 
 
1
  from os.path import basename, join
2
  from pathlib import Path
3
 
 
30
  return dataset
31
 
32
 
33
+ def main():
34
+ repo_dir = Path(__file__).resolve().parent.parent
35
+ data_dir = join(repo_dir, "data")
36
+ kendex_dir = join(data_dir, "Kendex")
37
+ audio_dir = join(kendex_dir, "wavs")
38
 
39
+ metadata = pd.read_csv(join(kendex_dir, "metadata.csv"), delimiter="|", header=None)
40
+ wavs = pd.Series([join(audio_dir, f"{f}.wav") for f in metadata[0]])
41
+ data = {
42
+ "audio": wavs,
43
+ "file": [basename(w) for w in wavs],
44
+ "text": metadata[1],
45
+ "norm": metadata[1].map(lambda x: normalize(x)),
46
+ "duration": [librosa.get_duration(path=w) for w in wavs],
47
+ }
48
 
49
+ df = pd.DataFrame(data).sample(frac=1, random_state=666).reset_index(drop=True)
50
 
51
+ train, test = np.split(df, [int(0.9 * len(df))])
52
 
53
+ train_dataset = create_dataset(train)
54
+ test_dataset = create_dataset(test)
55
 
56
+ full_dataset = DatasetDict({"train": train_dataset, "test": test_dataset})
57
+ full_dataset.push_to_hub("michaelnetbiz/Kendex")
58
+
59
+
60
+ if __name__ == "__main__":
61
+ main()
kendex/pull_from_s3.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import boto3
3
+ import tarfile
4
+ from pathlib import Path
5
+
6
+ KENDEX_TARBALL = "Kendex.tar.gz"
7
+
8
+
9
+ def main():
10
+ repo_dir = Path(__file__).resolve().parent.parent
11
+ data_dir = os.path.join(repo_dir, "data")
12
+ kendex_dir = os.path.join(data_dir, "Kendex")
13
+
14
+ if not Path(os.path.join(data_dir, KENDEX_TARBALL)).exists():
15
+ s3 = boto3.resource("s3")
16
+ kendex = s3.Bucket("kendex")
17
+ kendex.download_file(KENDEX_TARBALL, os.path.join(data_dir, KENDEX_TARBALL))
18
+ print(f"downloaded {KENDEX_TARBALL}")
19
+
20
+ if not Path(kendex_dir).exists():
21
+ file = tarfile.open(os.path.join(data_dir, KENDEX_TARBALL))
22
+ file.extractall(data_dir)
23
+ file.close()
24
+ print(f"extracted {KENDEX_TARBALL}")
25
+
26
+
27
+ if __name__ == "__main__":
28
+ main()
scripts/pull_from_s3.py DELETED
@@ -1,24 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import os
4
- import boto3
5
- import tarfile
6
- from pathlib import Path
7
-
8
- KENDEX_TARBALL = "Kendex.tar.gz"
9
-
10
- repo_dir = Path(__file__).resolve().parent.parent
11
- data_dir = os.path.join(repo_dir, "data")
12
- kendex_dir = os.path.join(data_dir, "Kendex")
13
-
14
- if not Path(os.path.join(data_dir, KENDEX_TARBALL)).exists():
15
- s3 = boto3.resource("s3")
16
- kendex = s3.Bucket("kendex")
17
- kendex.download_file(KENDEX_TARBALL, os.path.join(data_dir, KENDEX_TARBALL))
18
- print(f"downloaded {KENDEX_TARBALL}")
19
-
20
- if not Path(kendex_dir).exists():
21
- file = tarfile.open(os.path.join(data_dir, KENDEX_TARBALL))
22
- file.extractall(data_dir)
23
- file.close()
24
- print(f"extracted {KENDEX_TARBALL}")