from python_speech_features import mfcc
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import random
import math
from skimage.transform import resize

def load_noise(path='dat/_background_noise_/'):
    noise = []
    if not os.path.exists(path):  # 添加路径检查
        raise FileNotFoundError(f"Noise directory {path} does not exist")
    files = os.listdir(path)
    for f in files:
        if not f.endswith('.wav'):
            continue
        filepath = os.path.join(path, f)
        rate, sig = wav.read(filepath)
        if rate != 16000:  # 检查采样率
            print(f"Warning: {f} has sample rate {rate}, expected 16000. Skipping.")
            continue
        noise.append(sig)
    return noise

def generate_mfcc(sig, rate, sig_len, noise=None, noise_weight=0.1, winlen=0.032, winstep=0.032/2, numcep=13, nfilt=26, nfft=512, lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):
    if(len(sig) != sig_len):
        if(len(sig)< sig_len):
            sig = np.pad(sig, (0, sig_len - len(sig)), 'constant')
        if(len(sig) >sig_len):
            sig = sig[0:sig_len]
    # i dont know, 'tensorflow' normalization
    sig = sig.astype('float32') / 32768.0
    if(noise is not None):
        noise_sig = noise[random.randint(0, len(noise)-1)]
        # 确保噪声长度足够
        if len(noise_sig) < sig_len:
            noise_sig = np.pad(noise_sig, (0, max(0, sig_len - len(noise_sig))), 'wrap')  # 循环填充
        start = random.randint(0, max(0, len(noise_sig) - sig_len))
        noise = noise_sig[start:start+sig_len].astype(np.float32) / 32768.0  # 显式指定类型
        sig = sig * (1-noise_weight) + noise * noise_weight
        #wav.write('noise_test.wav', rate, sig)
    mfcc_feat = mfcc(sig, rate, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,
                     highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
    mfcc_feat = mfcc_feat.astype('float32')
    return mfcc_feat
    
'''
def resize_array(array, new_shape):
    """
    使用平均值池化将数组缩小到新的尺寸。
    
    :param array: 输入的二维数组
    :param new_shape: 缩小后的新尺寸 (rows, cols)
    :return: 缩小后的二维数组
    """
    orig_shape = array.shape
    scale_rows = orig_shape[0] / new_shape[0]
    scale_cols = orig_shape[1] / new_shape[1]
    
    # 创建缩小后的数组
    resized_array = np.zeros(new_shape, dtype=array.dtype)
    
    # 遍历缩小后的数组
    for i in range(new_shape[0]):
        for j in range(new_shape[1]):
            # 计算原始数组中的对应区域
            start_row = int(i * scale_rows)
            end_row = int((i + 1) * scale_rows)
            start_col = int(j * scale_cols)
            end_col = int((j + 1) * scale_cols)
            
            # 处理边界情况，确保索引不会越界
            start_row = max(start_row, 0)
            end_row = min(end_row, orig_shape[0])
            start_col = max(start_col, 0)
            end_col = min(end_col, orig_shape[1])
            
            # 计算该区域的平均值
            resized_array[i, j] = np.mean(array[start_row:end_row, start_col:end_col])
    
    return resized_array
'''

def CLAMP(value, min_value, max_value):
    """数值范围限制函数（Python 版本）"""
    return max(min(value, max_value), min_value)

def bilinear_interpolate(src, dst_width, dst_height):
    """
    双线性插值函数（Python 版本）
    :param src: 输入二维数组 (height, width)
    :param dst_width: 目标宽度
    :param dst_height: 目标高度
    :return: 缩放后的二维数组，数据类型为 np.float32
    """
    src_height, src_width = src.shape
    dst = np.zeros((dst_height, dst_width), dtype=np.float32)
    
    # 处理空输入或输出尺寸为0的情况
    if src_width == 0 or src_height == 0 or dst_width == 0 or dst_height == 0:
        return dst
    
    # 计算比例因子（避免除零错误）
    x_ratio = (src_width - 1) / max(dst_width - 1, 1)
    y_ratio = (src_height - 1) / max(dst_height - 1, 1)
    
    # 将输入数组转换为一维访问（模拟C语言指针）
    src_flat = src.flatten()
    
    for dst_y in range(dst_height):
        for dst_x in range(dst_width):
            # 计算源坐标
            src_x = dst_x * x_ratio
            src_y = dst_y * y_ratio
            
            # 确定四个邻近点坐标
            x1 = math.floor(src_x)
            y1 = math.floor(src_y)
            x2 = x1 + 1
            y2 = y1 + 1
            
            # 边界保护（使用CLAMP函数）
            x1 = CLAMP(x1, 0, src_width - 1)
            y1 = CLAMP(y1, 0, src_height - 1)
            x2 = CLAMP(x2, 0, src_width - 1)
            y2 = CLAMP(y2, 0, src_height - 1)
            
            # 计算一维索引（模拟C语言指针访问）
            index11 = y1 * src_width + x1
            index21 = y1 * src_width + x2
            index12 = y2 * src_width + x1
            index22 = y2 * src_width + x2
            
            # 获取四个点的值
            Q11 = src_flat[index11]
            Q21 = src_flat[index21]
            Q12 = src_flat[index12]
            Q22 = src_flat[index22]
            
            # 计算插值权重
            wx = src_x - x1
            wy = src_y - y1
            wx1 = 1.0 - wx
            wy1 = 1.0 - wy
            
            # 执行双线性插值
            interp_val = (Q11 * wx1 * wy1 +
                          Q21 * wx * wy1 +
                          Q12 * wx1 * wy +
                          Q22 * wx * wy)
            
            # 限制在int8范围
            dst[dst_y, dst_x] = interp_val
    return dst
    
def merge_mfcc_file(input_path='dat/', mix_noise=True, sig_len=16000, winlen=0.032, winstep=0.032/2, numcep=13, nfilt=26, nfft=512,
                    lowfreq=20, highfreq=4000, winfunc=np.hanning, ceplifter=0, preemph=0.97):
    train_data = []
    test_data = []
    validate_data = []
    train_lable = []
    test_label = []
    validate_label =[]

    if mix_noise:
        noise = load_noise()
    else:
        noise = None

    with open(input_path + 'testing_list.txt', 'r') as f:
        test_list = f.read()
    with open(input_path +  'validation_list.txt', 'r') as f:
        validate_list = f.read()
    files = os.listdir(input_path)
    for fi in files:
        fi_d = os.path.join(input_path, fi)
        # folders of each cmd
        if os.path.isdir(fi_d):
            label = fi_d.split('/')[1] # get the label from the dir
            print(label)
            # noise in training
            
            if 'noise' in label:
                for f in os.listdir(fi_d):
                    filename = f
                    if('wav' not in filename):
                        continue
                    f = os.path.join(fi_d, f)
                    (rate, sig) = wav.read(f)
                    for i in range(0, len(sig), sig_len):
                        
                        data = generate_mfcc(sig[i:i+sig_len], rate, sig_len, winlen=winlen, winstep=winstep, numcep=numcep,
                                             nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,
                                             highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
                        data = np.array(data)  # ?? no idea why this works
                        #train_data.append(data)
                        resize_data = bilinear_interpolate(data, 13,54)
                        train_data.append(resize_data)
                        train_lable.append('noise')

                continue
            # dataset
            for f in os.listdir(fi_d):
                filename = f
                
                f = os.path.join(fi_d, f)
                (rate, sig) = wav.read(f)

                # split dataset into train, test, validate
                
                if filename in test_list:
                    data = generate_mfcc(sig, rate, sig_len, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
                    data = np.array(data) # ?? no idea why this works
                    
                    #test_data.append(data)
                    #resize_data=resize_array(data, (31,13))
                    resize_data=bilinear_interpolate(data, 13,54)
                    test_data.append(resize_data)
                    test_label.append(label)
                
                elif filename in validate_list:
                    data = generate_mfcc(sig, rate, sig_len, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
                    data = np.array(data) # ?? no idea why this works
                    
                    #validate_data.append(data)
                    #resize_data=resize_array(data, (31,13))
                    resize_data=bilinear_interpolate(data, 13,54)
                    validate_data.append(resize_data)
                    validate_label.append(label)
                else:
                    data = generate_mfcc(sig, rate, sig_len, noise=noise, winlen=winlen, winstep=winstep, numcep=numcep, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq, highfreq=highfreq, winfunc=winfunc, ceplifter=ceplifter, preemph=preemph)
                    data = np.array(data) # ?? no idea why this works
                    #train_data.append(data)
                    #resize_data=resize_array(data, (31,13))
                    resize_data=bilinear_interpolate(data, 13,54)
                    train_data.append(resize_data)
                    train_lable.append(label)
                
    # finalize
    train_data = np.array(train_data)
    test_data = np.array(test_data)
    validate_data = np.array(validate_data)

    return (train_data, train_lable), (test_data, test_label), (validate_data, validate_label)

if __name__ == "__main__":

    # test
    (x_train, y_train), (x_test, y_test), (x_val, y_val) = merge_mfcc_file()
    
    np.save('train_data.npy', x_train)
    np.save('train_label.npy', y_train)
    np.save('test_data.npy', x_test)
    np.save('test_label.npy', y_test)
    np.save('val_data.npy', x_val)
    np.save('val_label.npy', y_val)
    
    print('x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())

    mfcc_feat = x_train[10]
    mfcc_feat = np.swapaxes(mfcc_feat, 0, 1)
    ig, ax = plt.subplots()
    cax = ax.imshow(mfcc_feat, interpolation='nearest', origin='lower', aspect='auto')
    ax.set_title('MFCC')
    plt.show()