jg583 commited on
Commit
e4197d8
1 Parent(s): 9538a6c
Files changed (2) hide show
  1. NSynth.py +108 -0
  2. README.md +3 -0
NSynth.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ import os
4
+ import wave
5
+
6
+ _CITATION = """\
7
+ @misc{nsynth2017,
8
+ Author = {Jesse Engel and Cinjon Resnick and Adam Roberts and
9
+ Sander Dieleman and Douglas Eck and Karen Simonyan and
10
+ Mohammad Norouzi},
11
+ Title = {Neural Audio Synthesis of Musical Notes with WaveNet Autoencoders},
12
+ Year = {2017},
13
+ Eprint = {arXiv:1704.01279},
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ The NSynth dataset is an audio dataset containing over 300,000 musical notes across over 1000 commercially-sampled instruments, distinguished by pitch, timbre, and envelope. Each recording was made by playing and holding a musical note for three seconds and letting it decay for one second. The collection of four-second recordings ranges over every pitch on a standard MIDI piano (or as many as possible for the given instrument), played at five different velocities.
19
+
20
+ This dataset was created as an attempt to establish a high-quality entry point into audio machine learning, in response to the surge of breakthroughs in generative modeling of images due to the abundance of approachable image datasets (MNIST, CIFAR, ImageNet). NSynth is meant to be both a benchmark for audio ML and a foundation to be expanded on with future datasets.
21
+
22
+ """
23
+
24
+ _HOMEPAGE = "https://magenta.tensorflow.org/datasets/nsynth#note-qualities"
25
+
26
+ _LICENSE = "Creative Commons Attribution 4.0 International (CC BY 4.0)"
27
+
28
+ class NSynth(datasets.GeneratorBasedBuilder):
29
+ def _info(self):
30
+ features = datasets.Features({
31
+ "note": datasets.Value("int64"),
32
+ "note_str": datasets.Value("string"),
33
+ "instrument": datasets.Value("int64"),
34
+ "instrument_str": datasets.Value("string"),
35
+ "pitch": datasets.Value("int64"),
36
+ "velocity": datasets.Value("int64"),
37
+ "sample_rate": datasets.Value("int64"),
38
+ "qualities": datasets.features.Sequence(datasets.Value("int64")),
39
+ "qualities_str": datasets.features.Sequence(datasets.Value("string")),
40
+ "instrument_family": datasets.Value("int64"),
41
+ "instrument_family_str": datasets.Value("string"),
42
+ "instrument_source": datasets.Value("int64"),
43
+ "instrument_source_str": datasets.Value("string"),
44
+ "audio": datasets.features.Audio(mono = False)
45
+ })
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=features,
49
+ homepage=_HOMEPAGE,
50
+ license=_LICENSE,
51
+ citation=_CITATION,
52
+ )
53
+ def _split_generators(self, dl_manager):
54
+ dl_paths = dl_manager.download_and_extract({
55
+ 'train': 'http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-train.jsonwav.tar.gz',
56
+ 'valid': 'http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-valid.jsonwav.tar.gz',
57
+ 'test': 'http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-test.jsonwav.tar.gz',
58
+ })
59
+ return[
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TRAIN,
62
+ gen_kwargs={
63
+ "filepath": dl_paths['train']
64
+ },
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.VALIDATION,
68
+ gen_kwargs={
69
+ "filepath": dl_paths['valid']
70
+ },
71
+ ),
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TEST,
74
+ gen_kwargs={
75
+ "filepath": dl_paths['test']
76
+ },
77
+ ),
78
+ ]
79
+ def _generate_examples(self, filepath):
80
+ with open(filepath) as f:
81
+ dir_list = os.listdir(filepath)
82
+ if "examples.json" in dir_list:
83
+ examples = json.load(filepath.join("/examples.json"))
84
+ if "audio" in dir_list:
85
+ wav_dict = {}
86
+ audio_path = filepath.join("/audio")
87
+ for filename in audio_path:
88
+ with wave.open(filename, 'rb') as wav_file:
89
+ key = wav_file.replace('.wav', '')
90
+ wav_dict[key] = wav_file.readframes(wav_file.getnframes())
91
+ with open(json_data) as examples:
92
+ for key in examples:
93
+ yield key, {
94
+ "note": key["note"],
95
+ "note_str": key["note_str"],
96
+ "instrument": key["instrument"],
97
+ "instrument_str": key["instrument_str"],
98
+ "pitch": key["pitch"],
99
+ "velocity": key["velocity"],
100
+ "sample_rate": key["sample_rate"],
101
+ "qualities": key["qualities"],
102
+ "qualities_str": key["qualities_str"],
103
+ "instrument_family": key["instrument_family"],
104
+ "instrument_family_str": key["instrument_family_str"],
105
+ "instrument_source": key["instrument_source"],
106
+ "instrument_source_str": key["instrument_source_str"],
107
+ "audio": wav_dict[key]
108
+ }
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ ---