import numpy as np
from scipy.fftpack import fft, dct, ifft
from scipy.io import wavfile
import os
import re
import hashlib
import json

MAX_NUM_WAVS_PER_CLASS = 2**27 - 1  # ~134M

# 来自google语料库中的分类函数, 将总的样本集抽出一部分作为验证集和测试集, 其余作为训练集.
# 其中validation_percentage和testing_percentage两个参数分别为提取验证集和测试集的比率.
def which_set(filename, validation_percentage, testing_percentage):
    """Determines which data partition the file should belong to.

    We want to keep files in the same training, validation, or testing sets even
    if new ones are added over time. This makes it less likely that testing
    samples will accidentally be reused in training when long runs are restarted
    for example. To keep this stability, a hash of the filename is taken and used
    to determine which set it should belong to. This determination only depends on
    the name and the set proportions, so it won't change as other files are added.

    It's also useful to associate particular files as related (for example words
    spoken by the same person), so anything after '_nohash_' in a filename is
    ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
    'bobby_nohash_1.wav' are always in the same set, for example.

    Args:
        filename: File path of the data sample.
        validation_percentage: How much of the data set to use for validation.
        testing_percentage: How much of the data set to use for testing.

    Returns:
        String, one of 'training', 'validation', or 'testing'.
    """
    base_name = os.path.basename(filename)
    # We want to ignore anything after '_nohash_' in the file name when
    # deciding which set to put a wav in, so the data set creator has a way of
    # grouping wavs that are close variations of each other.
    hash_name = re.sub(r'_nohash_.*$', '', base_name)
    # This looks a bit magical, but we need to decide whether this file should
    # go into the training, testing, or validation sets, and we want to keep
    # existing files in the same set even if more files are subsequently
    # added.
    # To do that, we need a stable way of deciding based on just the file name
    # itself, so we do a hash of that and then use that to generate a
    # probability value that we use to assign it.
    hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()
    percentage_hash = ((int(hash_name_hashed, 16) %
                        (MAX_NUM_WAVS_PER_CLASS + 1)) *
                       (100.0 / MAX_NUM_WAVS_PER_CLASS))
    if percentage_hash < validation_percentage:
        result = 'validation'
    elif percentage_hash < (testing_percentage + validation_percentage):
        result = 'testing'
    else:
        result = 'training'
    return result


VALIDATION_PERCENTAGE = 10 # 本例中用的验证样本数量的百分比
TESTING_PERCENTAGE    = 0  # 本例中用的测试样本数量的百分比

# de-noise audio to find start and end of utterance.
# to do this, we will perform 256 pt FFT with overlapping windows 50% (Hann window),
# we will remove frequency bins 0-125Hz (bins 0,1,2) and high freq bins above 3kHz,
# so remove last 80bins
# 对原始声音在频域上进行降噪和滤波, 然后再转回时域输出.
# 16khz采样率, 256点, 频域步长为16000/256=62.5Hz, 人声音频范围为(100hz,3khz), 对应频点坐标为[3, 48].
def cleanup_audio (audio_data):

    # 将原始音频流分帧, 每帧256个点, 相邻帧之间有128点重叠
    n_windows = audio_data.size//256
    audio_data = audio_data[:n_windows*256]
    audio_data_window0 = np.reshape(audio_data[:-128],[-1,128])
    audio_data_window1 = np.reshape(audio_data[128: ],[-1,128])
    audio_data_stacked = np.hstack([audio_data_window0,audio_data_window1])
    audio_data_256     = np.reshape(audio_data_stacked,[-1,256])

    # 时域加窗, FFT变换到频域, 滤除非人声部分的频点
    window = np.hanning(256)
    audio_data_fft = fft(audio_data_256*window)
    audio_data_fft[:,0:3]     = np.zeros(3)  # throw away lower freq bins
    audio_data_fft[:,256-2:256] = np.zeros(2)  # throw away lower freq bins
    audio_data_fft[:,48:128] = np.zeros(80) # throw away upper bins
    audio_data_fft[:,129:209] = np.zeros(80) # throw away upper bins

    # IFFT变换回时域, 恢复成线性数列
    audio_data_ifft = ifft(audio_data_fft)   # inverse fft
    audio_data_ifft_in128 = np.reshape(audio_data_ifft,[-1,128])
    audio_data_ifft_in128 = np.append(audio_data_ifft_in128,np.zeros([1,128]))
    audio_data_ifft_in128 = np.reshape(audio_data_ifft,[-1,128])
    audio_data_ifft_in128 = np.insert(audio_data_ifft_in128,0,np.zeros([1,128]),axis=0)
    audio_data_ifft_in128 = np.reshape(audio_data_ifft,[-1,256])
    audio_data_final = audio_data_ifft_in128[:,0:128] + audio_data_ifft_in128[:,128:256]
    audio_data_final = audio_data_final[:,0:128].flatten()
    #for i in range (audio_data_final.size) :
    #       print (i," ",audio_data_final[i])
    #some_bs
    return audio_data_final

# path_sample_dir = './data'
# list_keywords = ['zero','one','two','three','four','five','six','seven','eight','nine']
# fd_train = open('dataset_train.csv', 'w')
# fd_eval  = open('dataset_eval.csv' , 'w')

# 从配置文件中载入配置信息
with open("conf.json", "r") as f_conf:
    conf = json.load(f_conf)

path_sample_dir = conf["path_samples_dir"]
list_keywords   = conf["list_keywords"]
fd_train   = open(conf["file_dataset_train"], 'w')
fd_eval    = open(conf["file_dataset_eval"] , 'w')

print ("FileName,kFirst,kLast,Digit,Class",file=fd_train)
print ("FileName,kFirst,kLast,Digit,Class",file=fd_eval)

AUDIO_SILENCE_THRESHOLD = 3000 # 在归一化后的音频流中判定静音的阈值

# 遍历所有的样本文件夹:
# - 将总的样本集抽出一部分作为验证集和测试集, 其余作为训练集
# - 提取每个样本的有效声音片段(经过滤波和补足定长, 记录开始和结束位置)
# - 汇总这些标记信息打印到dataset_train.csv和dataset_eval.csv文件中, 供后续训练过程使用
for index_keyword in range(len(list_keywords)):

    path_keyword = os.path.join(path_sample_dir, list_keywords[index_keyword]).replace('\\', '/')
    print(path_keyword) # 显示当前正在扫描的路径

    for entry in os.scandir(path_keyword):
        if entry.is_file():

            f_name = os.path.join(path_sample_dir, list_keywords[index_keyword], entry.name)

            # 对原始数据集进行分类, 将当前样本分成验证集(validation)/测试集(testing)/训练集(training)中的某一个类
            set_name = which_set(f_name, VALIDATION_PERCENTAGE, TESTING_PERCENTAGE)
            if set_name == "training":
                fd = fd_train
            else:
                fd = fd_eval

            # 对原始样本声音进行带通滤波降噪/清空背景噪音/补充到定长数据流等处理, 得到纯净的样本声音片段
            fs, data = wavfile.read(f_name)
            data = cleanup_audio(data)

            # 归一化, 将原有较小的声音动态范围扩大到整个16位整数的表示范围(-32768, +32768)
            max_value = np.max(abs(data))
            normalise_gain = 32768 / max_value
            data = data * normalise_gain

            # 提取有效声音片段开始和结束的位置:
            # - 开始第一个高出阈值的点作为开始
            # - 倒数第一个高出阈值的点作为结束
            for i in range(0, data.size, 1):
                if data[i] > AUDIO_SILENCE_THRESHOLD:
                    start = i
                    break
            for i in range(data.size-1, start, -1):
                if data[i] > AUDIO_SILENCE_THRESHOLD:
                    end = i
                    break

            # 打印提取关键信息到文件
            path_keyword_entry = os.path.join(path_sample_dir, list_keywords[index_keyword], entry.name).replace('\\', '/')
            print(   path_keyword_entry + ','
                   + str(start) + ','
                   + str(end) + ','
                   + str(index_keyword) + ','
                   + str(index_keyword)
                   , file=fd)

fd_train.close()
fd_eval.close()

print('DONE.')
