File size: 4,756 Bytes
d1d6816
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import joblib
import numpy as np
from generate_synthetic_data_online import generate_synth_dataset_log_512, generate_synth_dataset_log_muted_512
from tools import show_spc, spc_to_VAE_input, VAE_out_put_to_spc, np_log10
import torch.utils.data as data


class Data_cache():
    """This is a class that stores synthetic data."""

    def __init__(self, synthetic_data, external_sources):
        self.n_synthetic = np.shape(synthetic_data)[0]
        self.synthetic_data = synthetic_data.astype(np.float32)
        self.external_sources = external_sources.astype(np.float32)
        self.epsilon = 1e-20
        
    def get_all_data(self):
        return np.vstack([self.synthetic_data, self.external_sources])
        
    def refresh(self):
        self.synthetic_data = generate_synth_dataset(self.n_synthetic, mute=True)

    def get_data_loader(self, shuffle=True, BATCH_SIZE=8, new_way=False):
        all_data = self.get_all_data()
        our_data = []
        for i in range(len(all_data)):
            if new_way:
                spectrogram = VAE_out_put_to_spc(np.reshape(all_data[i], (1, 512, 256)))
                log_spectrogram = np.log10(spectrogram + self.epsilon)
                our_data.append(log_spectrogram)
            else:
                our_data.append(np.reshape(all_data[i], (1, 512, 256)))

        iterator = data.DataLoader(our_data, shuffle=shuffle, batch_size=BATCH_SIZE)
        return iterator


def generate_synth_dataset(n_synthetic, mute=False):
    """Preprocessing for synthetic data"""
    n_synthetic_sample = n_synthetic
    if mute:
        Input0 = generate_synth_dataset_log_muted_512(n_synthetic_sample)
    else:
        Input0 = generate_synth_dataset_log_512(n_synthetic_sample)
    Input0 = spc_to_VAE_input(Input0)
    Input0 = Input0.reshape(Input0.shape[0], Input0.shape[1], Input0.shape[2], 1)
    return Input0


def read_data(data_path):
    """Read external sources"""
    
    data = np.array(joblib.load(data_path))
    data = spc_to_VAE_input(data)
    data = data.reshape(data.shape[0], data.shape[1], data.shape[2], 1)
    return data


def load_data(n_synthetic):
    """Generate the hybrid dataset."""
    Input_synthetic = generate_synth_dataset(n_synthetic)
    
    Input_AU = read_data("./data/external_data/ARTURIA_data")
    print("ARTURIA dataset loaded.")
    
    Input_NSynth = read_data("./data/external_data/NSynth_data")
    print("NSynth dataset loaded.")
    
    Input_SF = read_data("./data/external_data/soundfonts_data")
    Input_SF_256 = np.zeros((337, 512, 256, 1))
    Input_SF_256[:,:,:251,:] += Input_SF
    Input_SF =Input_SF_256
    print("SoundFonts dataset loaded.")

    Input_google = read_data("./data/external_data/WaveNet_samples")
    
    Input_external = np.vstack([Input_AU, Input_NSynth, Input_SF, Input_google])
    data_cache = Data_cache(Input_synthetic, Input_external)
    print(f"Data loaded, data shape: {np.shape(data_cache.get_all_data())}")
    return data_cache
    

def show_data(dataset_name, n_sample=3, index=-1, new_way=False):
    """Show and return a certain dataset.

    Parameters
    ----------
    dataset_name: String
        Name of the dataset to show.
    n_samples: int
        Number of samples to show.
    index: int
        Setting 'index' larger equal 0 shows the 'index'-th sample in the desired dataset.

    Returns
    -------
    np.ndarray:
        The showed dataset.
    """

    if dataset_name == "ARTURIA":
        data = read_data("./data/external_data/ARTURIA_data")
    elif dataset_name == "NSynth":
        data = read_data("./data/external_data/NSynth_data")
    elif dataset_name == "SoundFonts":
        data = read_data("./data/external_data/soundfonts_data")
    elif dataset_name == "Synthetic":
        data = generate_synth_dataset(int(n_sample * 3))
    else:
        print("Example command: \"!python thesis_main.py show_data -s [ARTURIA, NSynth, SoundFonts, Synthetic] -n 5\"")
        return
    
    if index >= 0:
        show_spc(VAE_out_put_to_spc(data[index]))
    else:
        for i in range(n_sample):
            index = np.random.randint(0,len(data))
            print(index)
            show_spc(VAE_out_put_to_spc(data[index]))
    return data
    

def show_data(tensor_batch, index=-1, new_way=False):
    if index < 0:
        index = np.random.randint(0, tensor_batch.shape[0])
        
    if new_way:
        sample = tensor_batch[index].detach().numpy()
        spectrogram = 10.0 ** sample
        print(f"The {index}-th sample:")
        show_spc(spectrogram)
    else:
        sample = tensor_batch[index].detach().numpy()
        show_spc(VAE_out_put_to_spc(sample))
    # return data