import librosa
import os
import json


DATASET_PATH = "D:\\Audio\\datasets\\speech_commands_v0.01"
JSON_PATH = os.path.join(DATASET_PATH, "data.json")
SAMPLES_TO_CONSIDER = 22050


def preprocess_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):
    
    # data dictionary
    data = {
        "mapping": [],
        "labels": [],
        "MFCCs": [],
        "files": []
    }
    
    # loop through all the sub-dirs 
    for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
        
        if dirpath is not dataset_path:
            
            label = dirpath.split("\\")[-1]
            data["mapping"].append(label)
            print("\nProcessing: {}".format(label))
            
            for f in filenames:
                file_path = os.path.join(dirpath, f)
                signal, sample_rate = librosa.load(file_path)
                
                # longer than 1s
                if len(signal) >= SAMPLES_TO_CONSIDER:
                    
                    # ensure consistency of the length of the signal
                    signal = signal[:SAMPLES_TO_CONSIDER]
                    
                    MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc=n_mfcc, hop_length=hop_length, n_fft=n_fft)
                    
                    data["MFCCs"].append(MFCCs.T.tolist())
                    data["labels"].append(i - 1)
                    data["files"].append(file_path)
                    print("{}: {}".format(file_path, i - 1))
    
    with open(json_path, "w") as fp:
        json.dump(data, fp, indent=4)


if __name__ == "__main__":
    preprocess_dataset(DATASET_PATH, JSON_PATH)
                    
    