# -*- coding:utf-8 -*-#
# @Time:2023/5/31 14:48
# @Author:Adong
# @Software:PyCharm

"""

"""

from os.path import exists, join
from os import mkdir, listdir
import random
from heapq import nlargest
import numpy as np
import librosa
from sklearn.metrics.pairwise import cosine_similarity
from librosa.feature import melspectrogram
from librosa import stft, istft, load as _load, get_duration
import matplotlib.pyplot as plt
from numpy import max as _max,abs as _abs,log10,real,transpose,median,linspace,float32,var,arange,mean as _mean,zeros,append as _append,min as _min,array,pad
import scipy.io.wavfile as _wavefile
from PIL import Image
from scipy.fftpack import fft, ifft
from scipy.signal import savgol_filter
from sklearn.preprocessing import MinMaxScaler


class Universal_tool:
    def case_create(self, target_folder, fault_type):
        """
        在目标文件夹target_folder创建各类故障fault_type的文件夹。
        :param fault_type: 期望创建文件夹的故障种类
        :param target_folder:例如[wav_to_gray,wav_to_MEL,wav_to_STFT]
        :return:
        """
        if not exists(join('./data/', target_folder)):  # 判断所在目录下是否有该文件名的文件夹
            mkdir(join('./data/', target_folder))       # 创建多级目录用mkdirs，单击目录mkdir
        set_type = ['train', 'test', 'veri']            # 设置三种类型的数据集
        for st in set_type:                             # 遍历set_type
            if not exists(join('./data/', target_folder, st)):  # 判断所在目录下是否有该文件名的文件夹
                mkdir(join('./data/', target_folder, st))       # 创建多级目录用mkdirs，单击目录mkdir
            for ft in fault_type:                                # 遍历fault_type
                if not exists(join('./data/', target_folder, st, ft)):  # 判断所在目录下是否有该文件名的文件夹
                    mkdir(join('./data/', target_folder, st, ft))       # 创建多级目录用mkdirs，单击目录mkdir

    def FFT_denoisying(self, seq, rate):
        """
        FFT可以用来过滤低幅频率
        :param seq:原时域信号
        :param rate: 原信号采样频率
        :return: FFT降噪后的时域信号
        """
        '''原始频域'''
        _seq = fft(seq)                 # 对y做快速傅里叶变换
        _seq = _seq * (2 / len(seq))    # 幅值变换
        _seq[0] = _seq[0] / 2           # 最小频率直流信号的处理
        _seq[len(_seq) - 1] = _seq[len(_seq) - 1] / 2  # 最大频率直流信号的处理
        '''降噪频域'''
        _seq_real = _abs(_seq.copy())                   # 读取y_fft的实部
        _seq_real_log = log10(_seq_real.copy())         # 取对数
        slide = min(_seq_real_log)                      # 计算平移长度
        _seq_real_log = _seq_real_log - slide           # 向y轴正方向平移，直到所有的值都非负
        indices = _seq_real_log < _max(_seq_real_log.copy()) * 0.6          # 筛选对数实部值位于下60%的频率
        _seq_denoise = indices * _seq.copy() * 0 + ~indices * _seq.copy()   # 下60%幅值频率部分降低到0%，上40%幅值频率不变
        '''降噪时域'''
        seq_ = _seq_denoise.copy()                      # 把降噪频域信号赋值给y_ifft
        seq_[0] = seq_[0] * 2                           # 最小频率直流信号逆处理
        seq_[len(seq_) - 1] = seq_[len(seq_) - 1] * 2   # 最大频率直流信号逆处理
        seq_ = seq_ * (len(seq) / 2)                    # 幅值逆变换
        seq_ = ifft(seq_)                               # 逆傅里叶变换

        return real(seq_)

    def REPET(self, seq, rate, n_fft=25, hop_length=10):
        """
        基于重复模式提取的盲源分离算法
        :param hop_length: 业界常用窗移10ms
        :param n_fft: 业界常用窗长25ms
        :param seq:
        :param rate: 
        :return: 
        """
        n_fft = int(n_fft / 1000 * rate)
        hop_length = int(hop_length / 1000 * rate)
        noise_stft = stft(seq, n_fft=n_fft, hop_length=hop_length, window="hamm")   # stft得到时频谱

        noise_stft_trans = transpose(noise_stft.copy()).real                        # 转置并取实数值
        similar_matrix = cosine_similarity(noise_stft_trans)                        # 计算相似矩阵
        num_of_maxs = int(0.8 * len(similar_matrix))                                # 设置相似帧的数量

        denoise_stft = []                                       # 存储更新后的时频图
        for _idx, similar in enumerate(similar_matrix):
            similar = similar.tolist()                          # array转list
            idx = list(map(similar.index, nlargest(num_of_maxs, similar)))          # 求最大的num_of_maxs个索引
            similar_frame = noise_stft[:, idx]                  # 获得与取出帧相似度最大的若干帧
            V_sim = median(similar_frame, axis=1)               # 取每行的中位数。axis=1对应行，axis=0对应列
            temp = []                                           # 用于存储一帧的频率幅值
            for __idx, V in enumerate(V_sim):                    # 遍历每一个频率的中值
                if abs(noise_stft[__idx, _idx].real) <= abs(V.real):  # 小于等于中值，则保留原值
                    temp.append(noise_stft[__idx, _idx])
                else:
                    temp.append(V)
            denoise_stft.append(temp)
        denoise_stft = transpose(denoise_stft)
        '''计算维纳滤波系数部分'''
        # H_sim = denoise_stft / noise_stft         # 维纳滤波系数,H_sim是复数矩阵
        '''哈曼达积时频掩蔽'''
        # back_spec = noise_stft * H_sim            # 背景时频谱（振动信号），复数矩阵
        back_spec = denoise_stft
        noise_spec = noise_stft - back_spec         # 噪声信号，复数矩阵
        '''ISTFT部分'''
        istft_y = istft(noise_stft, n_fft=n_fft, hop_length=hop_length, window="hamm")
        istft_back_spec = istft(back_spec, n_fft=n_fft, hop_length=hop_length, window="hamm")
        istft_noise_spec = istft(noise_spec, n_fft=n_fft, hop_length=hop_length, window="hamm")
        # 含噪时频、背景时频、噪声时频、去噪时域、背景时域、噪声时域
        return noise_stft, back_spec, noise_spec, istft_y, istft_back_spec, istft_noise_spec

    def savgol(self, seq, win_length=51,stride=2):
        """
        曲线平滑
        :param stride:
        :param win_length:
        :param seq:
        :return:
        """
        """savgol平滑部分"""
        y1 = savgol_filter(seq, window_length=win_length, polyorder=stride)
        return y1

    @classmethod
    def array2wav(cls, nparray, rate, file_name):
        """
        将numpy数组转为单通道wav文件
        :param nparray: 输入的numpy向量
        :param file_name: wav文件名
        :param rate: 采样率(默认=sr)
        :return:
        """
        _wavefile.write(file_name, rate, nparray.astype(float32))

    def AVF(self, seq, rate,n_fft=20,hop_length=2):
        """
        幅值方差滤波
        :param hop_length:
        :param n_fft:
        :param seq:
        :param rate:
        :return:
        """
        n_fft = int(n_fft/1000 * rate)
        hop_length = int(hop_length/1000 * rate)
        noise_stft = stft(seq, n_fft=n_fft, hop_length=hop_length, window="hamm")

        amp = noise_stft.real
        amp_var = []  # 所有频率幅值的方差
        for hz in amp:
            amp_var.append(var(hz))

        '''计算50hz倍频点附近频点的方差值平均'''
        hz_50 = arange(50, len(noise_stft), 50)  # 找出50hz倍频
        W_win = 10
        alphaX = []         # 保存50hz倍频附近W_win个频率点方差的平均值
        Ptrans = []         # 变压器频率
        Pfan = []           # 风扇频率
        for idx, x in enumerate(hz_50):
            piece = amp_var[x - W_win:x + W_win]  # 找出50hz倍频附近W_win个频率点的方差
            mean = _mean(piece) / 2  # 计算平均值
            alphaX.append(mean)
            if mean > amp_var[x]:  # 如果平均值大于当前50倍频，则认为是本体
                Ptrans.append(x)
            else:
                Pfan.append(x)
        y_fft = fft(seq)  # 对y做傅里叶变换
        y_fft_new = y_fft.copy()
        symmetry = len(y_fft) / 2
        for idx, x in enumerate(y_fft):
            if idx in Pfan:
                y_fft_new[idx] = 0 + 0j
                y_fft_new[-idx] = 0 + 0j
            else:
                y_fft_new[idx] = x

        y_ifft = ifft(y_fft_new)  # 逆傅里叶变换
        Universal_tool.array2wav(y_ifft, rate, 'AVF.wav')
        return y_ifft


class Transform2Gray:
    """
    wav文件转灰度图
    """

    def __init__(self, file_root):
        """
        :param file_root:wav文件的文件夹路径
        """
        self.pic_size = None
        self.file_root = file_root
        self.file_path_list, self.file_name_list = self.get_file_path_list()

    def get_file_path_list(self):
        """
        获取wav文件夹内所有的文件名file_name和文件路径file_path_list
        :return: 文件路径列表file_path_list,文件名列表file_name
        """
        file_path_list = []  # 存放wav文件路径的列表
        file_name = listdir(self.file_root)  # 获取wav文件的文件名
        for file in file_name:  # 遍历wav文件的文件名
            file_path = join(self.file_root, file)  # 把文件夹路径和文件名连接，组成文件路径
            file_path_list.append(file_path)  # 添加文件路径到列表
        return file_path_list, file_name

    def create_GRAY_1channel(self, filepath, pic_size, train_percent, test_percent, veri_percent, savepath,
                             if_FFT=False, if_savgol=False, if_SMD=False, if_AVF=False):
        """
        生成64*64的单通道灰度图
        :param if_SMD: 设置是否执行SMD降噪
        :param filepath:待转换文件的保存路径
        :param pic_size:转换图像的边长
        :param train_percent:划分训练集的比例
        :param test_percent:划分测试集的比例
        :param veri_percent:划分验证集的比例
        :param savepath:生成图片的保存文件夹路径
        :param if_FFT:设置是否执行FFT降噪
        :param if_savgol:设置是否执行savgol滤波
        :return:
        """
        '''音频片段获取部分'''
        y, sr = _load(filepath)
        print("导入文件：" + filepath)
        sr = 48000
        print("重采样完成!现采样率=" + str(sr))
        if if_SMD is True:
            data_stft, back_spec, noise_spec, istft_y, istft_back_spec, istft_noise_spec = Universal_tool.REPET(y, sr)
            y = istft_back_spec
            print("相似矩阵降噪完成!")
        if if_AVF is True:
            y = Universal_tool.AVF(y, sr)
            print("模值波动降噪完成!")
        if if_FFT is True:  # 设置是否应用FFT降噪
            y = Universal_tool.FFT_denoisying(y, sr)  # 对原信号执行FFT降噪操作
            print("FFT降噪完成!")
        if if_savgol is True:
            y = Universal_tool.savgol(y, sr)  # 对原信号执行savgol平滑操作
            print("savgol平滑完成!")

        d = get_duration(y=y, sr=sr)  # 获取音频时长
        test_length, veri_length = test_percent * d, veri_percent * d  # 设置用于测试集和验证集的音频长度
        train_length = train_percent * d  # 设置用于训练集的音频长度
        self.pic_size = pic_size  # 设置目标灰度图的边长
        # 获取训练集截取片段的起点,随机生成40个起点（也就是40张该类型的训练集图片）
        train_start = random.sample(range(0, round(train_length * sr - pic_size * pic_size)), 200)
        # 获取测试集截取片段的起点,随机生成5个起点
        test_start = random.sample(
            range(round(train_length * sr), round(train_length * sr + test_length * sr - pic_size * pic_size)), 25)
        # 获取验证集截取片段的起点,随机生成5个起点
        veri_start = random.sample(range(round((train_length + test_length) * sr), round(d * sr - pic_size * pic_size)),
                                   25)
        '''音频片段数据集生成'''
        train_dataset = []  # 训练集图片
        test_dataset = []  # 测试集图片
        veri_dataset = []  # 验证集图片
        for i in train_start:  # 遍历所有训练集起点
            train_dataset.append(y[i:i + pic_size * pic_size])  # 把训练集片段加入列表
        for j in test_start:  # 同上
            test_dataset.append(y[j:j + pic_size * pic_size])
        for k in veri_start:  # 同上
            veri_dataset.append(y[k:k + pic_size * pic_size])
        '''数据集生成模式遍历'''
        mode_list = ['train', 'test', 'veri']
        for mode in mode_list:
            if mode == 'train':
                dataset = train_dataset
            elif mode == 'test':
                dataset = test_dataset
            elif mode == 'veri':
                dataset = veri_dataset
            else:
                dataset = None
            '''转灰度图'''
            img_save_path = savepath + mode + '/' + filepath.split('/')[
                -2] + '/'  # 在savepath路径下的mode文件夹的filepath对应故障类型文件夹
            for idx, piece in enumerate(dataset):
                n = len(listdir(img_save_path))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖
                pic = zeros(shape=(pic_size, pic_size))  # 生成空白灰度图
                for x in range(pic_size):  # 循环size次，遍历每一行的行号
                    rowstrat = pic_size * x  # 获取该行起点在上述音频片段中的索引
                    rowend = pic_size * (x + 1)  # 获取改行终点在上述音频片段中的索引
                    row = piece[rowstrat:rowend:1]  # 在上述音频片段中截取该行片段
                    pic[x] = row  # 将截取的该行片段放入空白灰度图中
                normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
                normalized_pic = normalize_tool.fit_transform(pic)  # 执行归一化
                img = Image.fromarray(normalized_pic)  # 将array转为image
                img = img.convert('L')  # 将image转为灰度图
                img.save(img_save_path + str(n + 1) + '.png')  # 保存灰度图


class Transform2RGB:
    """
    wav文件转RGB
    """

    def __init__(self, file_root=None):
        self.filepath = file_root  # 音频文件地址
        self.y, self.sr = _load(file_root)  # 读取音频的信号值和采样率
        self.audio_time = get_duration(path=file_root)  # 读取音频持续的时间
        the_length_of_the_audio_clip = 2.2  # 音频片段的时长
        self.samples = self.y[0:int(the_length_of_the_audio_clip / self.audio_time * len(self.y))]

    def time_domain(self):
        """
        生成音频时域图
        :return:
        """
        plt.figure()  # 生成画板
        ax1 = plt.subplot(1, 2, 1)  # 分区1：用于展示时域图
        ax2 = plt.subplot(1, 2, 2)  # 分区2：用于展示时域图转成的灰度图
        '''制作时域图'''
        time = linspace(0, self.audio_time, len(self.samples))  # 在0~self.audio_time的范围内生成len(self.samples)个点
        samples = self.samples
        ax1.plot(time, samples)
        ax1.set_title("time-amplitude")
        ax1.set_xlabel("time(s)")
        ax1.set_ylabel("amplitude(db)")
        '''制作灰度图'''
        m = min(samples)
        while len(samples) < 224 * 224:
            samples = _append(samples, m)
        reshape_samples = samples[:224 * 224].reshape((224, 224))
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normalized_pic = normalize_tool.fit_transform(reshape_samples)  # 执行归一化
        img = Image.fromarray(normalized_pic)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图
        ax2.imshow(img, cmap='gray')  # 显示单通道图像
        '''展示图像'''
        plt.show(block=True)
        return normalized_pic

    def frequency_domain(self):
        """
        生成音频频谱图
        :return:
        """
        plt.figure()  # 生成画板
        ax1 = plt.subplot(1, 2, 1)  # 分区1：用于展示时域图
        ax2 = plt.subplot(1, 2, 2)  # 分区2：用于展示时域图转成的灰度图
        '''制作频域图'''
        y_f = fft(self.samples)  # 快速傅里叶变换
        y_f = y_f * (2 / len(self.samples))  # 幅值变换
        y_f[0] = y_f[0] / 2  # 最小频率直流信号的处理
        y_f[len(y_f) - 1] = y_f[len(y_f) - 1] / 2  # 最大频率直流信号的处理
        frequency = linspace(0, self.sr / 2 - 1, int(self.sr / 2))  # 奈奎斯特采样定理，有效频率范围为0~sr/2
        ax1.plot(frequency, abs(y_f[0:int(self.sr / 2)]))
        ax1.set_title("frequency-amplitude")
        ax1.set_xlabel("frequency(Hz)")
        ax1.set_ylabel("amplitude(db)")
        '''制作灰度图'''
        y_f = abs(y_f)
        m = min(y_f)
        while len(y_f) < 224 * 224:
            y_f = _append(y_f, m)
        reshape_magnitude = y_f[:224 * 224].reshape((224, 224))  # 取magnititude前224*224个点转换为224*224的矩阵，用与制作单通道图像
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normalized_pic = normalize_tool.fit_transform(reshape_magnitude)  # 执行归一化
        img = Image.fromarray(normalized_pic)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图
        ax2.imshow(img, cmap='gray')  # 显示单通道图像
        # img.save('1.png')  # 保存灰度图
        '''展示图像'''
        plt.show(block=True)
        return normalized_pic

    def time_frequency_domain(self):
        """
        生成音频时频图
        :return:
        """
        fig = plt.figure()  # 生成画板
        ax1 = plt.subplot(1, 2, 1)  # 分区1：用于展示时域图
        ax2 = plt.subplot(1, 2, 2)  # 分区2：用于展示时域图转成的灰度图
        '''制作时频域图'''
        n_fft = int(self.sr)
        _stft = stft(y=self.samples, n_fft=n_fft)
        y_f = _stft * (2 / len(self.samples))  # 幅值变换
        y_f[:][0] = y_f[:][0] / 2  # 最小频率直流信号的处理
        y_f[:][len(y_f) - 1] = y_f[:][len(y_f) - 1] / 2  # 最大频率直流信号的处理
        '''FFT滤波'''
        # indices = stft > 300  # filter out those value under 300
        # stft = indices * stft  # noise frequency will be set to 0
        # spectrogram = librosa.amplitude_to_db(np.abs(stft))
        spectrogram = abs(stft) * (2 / len(self.samples))

        frequency = linspace(0, len(spectrogram) - 1, len(spectrogram))
        ax1.plot(frequency, spectrogram)
        ax1.set_title("frequency-amplitude")
        ax1.set_xlabel("frequency(Hz)")
        ax1.set_ylabel("amplitude(db)")
        '''制作灰度图'''
        col = spectrogram.shape[1]
        cut = round(224 * 224 / col)
        spectrogram = spectrogram[:cut]
        m = _min(spectrogram)
        while len(spectrogram) < 224 * 224:
            spectrogram = _append(spectrogram, m)
        reshape_spectrogram = spectrogram[:224 * 224].reshape(
            (224, 224))  # 取magnititude前224*224个点转换为224*224的矩阵，用与制作单通道图像
        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normalized_pic = normalize_tool.fit_transform(reshape_spectrogram)  # 执行归一化
        img = Image.fromarray(normalized_pic)  # 将array转为image
        img = img.convert('L')  # 将image转为灰度图
        ax2.imshow(img, cmap='gray')  # 显示单通道图像
        '''展示图像'''
        plt.show(block=True)
        return normalized_pic

    def array2RGB(self, red, green, blue):
        r = Image.fromarray(red).convert('L')
        g = Image.fromarray(green).convert('L')
        b = Image.fromarray(blue).convert('L')
        img = Image.merge('RGB', (r, g, b))
        img.show()
        img.save('1.png')


class Transform2Mel:
    '''
    把音频时域信号转成符合resnet输入尺寸的1*3*224*224的mel语谱图
    '''

    def __init__(self, file_root):
        """
        :param file_root:wav文件的文件夹路径
        """
        self.pic_size = None
        self.file_root = file_root
        self.file_path_list, self.file_name_list = self.get_file_path_list()

    def get_file_path_list(self):
        """
        获取wav文件夹内所有的文件名file_name和文件路径file_path_list
        :return: 文件路径列表file_path_list,文件名列表file_name
        """
        file_path_list = []  # 存放wav文件路径的列表
        file_name = listdir(self.file_root)  # 获取wav文件的文件名
        for file in file_name:  # 遍历wav文件的文件名
            file_path = join(self.file_root, file)  # 把文件夹路径和文件名连接，组成文件路径
            file_path_list.append(file_path)  # 添加文件路径到列表
        return file_path_list, file_name

    def basicData(self, y, sr):
        '''REPET盲源分离'''
        # noise_stft, back_spec, noise_spec, istft_y, istft_back_spec, istft_noise_spec = Universal_tool.REPET(y, sr)
        self.y = y
        self.sr = sr
        self.pic_size = 224
        self.n_fft = (self.pic_size - 1) * 2
        self.hop_length = self.pic_size - 1
        self.audio_length = (self.pic_size - 1) * self.hop_length

    def cut_pic(self, train_num, test_num, veri_num):
        """
        窗长为audio_length，窗移为
        :param train_num:
        :param test_num:
        :param veri_num:
        :return:
        """
        audio_length = self.audio_length
        y = self.y
        train_pic = []
        test_pic = []
        veri_pic = []
        pics_begin = random.sample(range(0, round(len(y) - audio_length)), train_num + test_num + veri_num)
        for i in range(train_num):
            train_pic.append(y[pics_begin[i]:pics_begin[i] + audio_length])
        for j in range(train_num, train_num + test_num):
            test_pic.append(y[pics_begin[j]:pics_begin[j] + audio_length])
        for k in range(train_num + test_num, train_num + test_num + veri_num):
            veri_pic.append(y[pics_begin[k]:pics_begin[k] + audio_length])

        self.train_pic = train_pic
        self.test_pic = test_pic
        self.veri_pic = veri_pic
        return train_pic, test_pic, veri_pic

    def zero_padding(self, mel_spect):
        """
        把128*224的Mel图居中零填充到224*224的尺寸
        """
        mel_spect = mel_spect.tolist()
        zero = zeros(len(mel_spect[0])).tolist()
        need_to_pad = self.pic_size - len(mel_spect)
        front = int(need_to_pad / 2)
        back = need_to_pad - front
        for i in range(front):
            mel_spect.insert(0, zero)
        for j in range(back):
            mel_spect.append(zero)
        return array(mel_spect)

    def trans2singleMel(self, y):
        '''
        生成单张Mel语谱图
        :return:
        '''
        mel_spect = melspectrogram(y=y, sr=self.sr, n_fft=self.n_fft,
                                   hop_length=self.hop_length)  # Mel谱
        noise_stft = stft(y, n_fft=self.n_fft, hop_length=self.hop_length, window="hamm").real  # stft得到时频谱

        normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
        normalized_mel = normalize_tool.fit_transform(mel_spect)  # 执行归一化
        normalized_stft = normalize_tool.fit_transform(noise_stft)
        mel_spect = self.zero_padding(normalized_mel)
        noise_stft = self.zero_padding(normalized_stft)
        # img = Image.fromarray(normalized_pic)                       # 将array转为image
        # img = img.convert('L')                                      # 将image转为灰度图
        # img.save('zlpc.png')                                        # 保存灰度图
        return mel_spect, noise_stft

    def merge(self, pics, img_save_path):
        """
        随机取三个pic合并成RGB
        :param pics:[train_pic,test_pic,veri_pic]
        :param num:生成RGB图的数量
        :return:
        """
        for pic in pics:
            n = len(listdir(img_save_path))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖

            '''常规时频图、Mel时频图提取'''
            # mel_spect,noise_stft = self.trans2singleMel(pic)
            '''时域图提取'''
            need_to_pad = self.pic_size * self.pic_size - len(pic)
            front = int(need_to_pad / 2)
            back = need_to_pad - front
            time_array = pad(pic, (front, back), 'constant', constant_values=(0, 0))
            time_array = time_array.reshape(self.pic_size, self.pic_size)
            normalize_tool = MinMaxScaler(feature_range=(0, 255))  # 导入最大最小归一化工具，设置归一化范围为(0,255)
            time_array = normalize_tool.fit_transform(time_array)  # 执行归一化
            '''合成RGB'''
            # r = Image.fromarray(mel_spect).convert('L')
            # g = Image.fromarray(noise_stft).convert('L')
            b = Image.fromarray(time_array).convert('L')
            b.save(img_save_path + str(n) + '.png')
            # img = Image.merge('RGB', (r, g, b))
            # img.save(img_save_path + str(n) + '.png')

        '''三个不同时段的Mel图合成RGB'''
        # for i in range(len(pics)):
        #     n = len(os.listdir(img_save_path))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖
        #
        #     pic3 = random.sample(pics,3)
        #
        #     r = Image.fromarray(self.trans2singleMel(pic3[0])).convert('L')
        #     g = Image.fromarray(self.trans2singleMel(pic3[1])).convert('L')
        #     b = Image.fromarray(self.trans2singleMel(pic3[2])).convert('L')
        #     img = Image.merge('RGB', (r, g, b))
        #     # img.show()
        #     img.save(img_save_path + str(n) + '.png')


if __name__ == '__main__':
    path1 = r'E:\MY_code\LSTM_create_wav\data\重过载_byq_zgz_01.wav'
    path2 = r'E:\MY_code\LSTM_create_wav\create_wav\create_zgz_0.wav'
    path3 = r'E:\MY_code\LSTM_create_wav\create_wav\create_zgz_4.wav'
    path4 = r'E:\MY_code\LSTM_create_wav\create_wav\create_zgz_8.wav'
    y1, sr1 = librosa.load(path1)
    y2, sr2 = librosa.load(path2)
    y3, sr3 = librosa.load(path3)
    y4, sr4 = librosa.load(path4)
    mel_spec1 = librosa.feature.melspectrogram(y=y1, sr=sr1, n_fft=int(0.025 * sr1), hop_length=int(0.02 * sr1))  # Mel谱
    mel_spec2 = librosa.feature.melspectrogram(y=y2, sr=sr2, n_fft=int(0.025 * sr2), hop_length=int(0.02 * sr2))  # Mel谱
    mel_spec3 = librosa.feature.melspectrogram(y=y3, sr=sr3, n_fft=int(0.025 * sr3), hop_length=int(0.02 * sr3))  # Mel谱
    mel_spec4 = librosa.feature.melspectrogram(y=y4, sr=sr4, n_fft=int(0.025 * sr4), hop_length=int(0.02 * sr4))  # Mel谱
    plt.subplot(2, 2, 1)
    librosa.display.specshow(np.log(abs(mel_spec1)),sr = sr1,x_axis='time',y_axis='mel')
    plt.subplot(2, 2, 2)
    librosa.display.specshow(np.log(abs(mel_spec2)),sr = sr2,x_axis='time',y_axis='mel')
    plt.subplot(2, 2, 3)
    librosa.display.specshow(np.log(abs(mel_spec3)),sr = sr3,x_axis='time',y_axis='mel')
    plt.subplot(2, 2, 4)
    librosa.display.specshow(np.log(abs(mel_spec4)),sr = sr4,x_axis='time',y_axis='mel')
    plt.show()
