# -*- coding:utf-8 -*-#
# @Time:2023/7/3 9:22
# @Author:Adong
# @Software:PyCharm
"""
MFCC GFCC PNCC特征融合demo
"""

import librosa
# from spafe.features.gfcc import gfcc
# from spafe.features.pncc import pncc
# from spafe.features.mfcc import mfcc
from sklearn.preprocessing import MinMaxScaler
from PIL import Image
import os
import random



class MGPmerge:
    def __init__(self, file_root):
        """
        :param file_root:wav文件的文件夹路径
        """
        self.file_root = file_root
        self.file_path_list, self.file_name_list = self.get_file_path_list()
        self.img_save_path = r'./data/noise_img/'

    def get_file_path_list(self):

        file_path_list = []  # 存放wav文件路径的列表
        file_name = os.listdir(self.file_root)  # 获取wav文件的文件名
        for file in file_name:  # 遍历wav文件的文件名
            file_path = os.path.join(self.file_root, file)  # 把文件夹路径和文件名连接，组成文件路径
            file_path_list.append(file_path)  # 添加文件路径到列表
        return file_path_list, file_name

    def cal_MFCC(self,y,sr):
        MFCC = mfcc(y,sr,num_ceps = 48,nfilts=48)
        return MFCC

    def cal_GFCC(self,y,sr):
        GFCC = gfcc(y,sr,num_ceps = 48,nfilts=48)
        return GFCC


    def cal_PNCC(self,y,sr):
        PNCC = pncc(y,sr,num_ceps = 48,nfilts=48)
        return PNCC

    def cut_pic(self,num,audio_length,y,mode):
        if mode == 1:
            pics = []
            pics_begin = random.sample(range(0, round(len(y) - audio_length)), num)
            for i in range(num):
                pics.append(y[pics_begin[i]:pics_begin[i]+audio_length])
            return pics
        elif mode == 2:
            pics = []
            pics_num = len(y)//audio_length
            for i in range(pics_num):
                begin = i * audio_length
                pics.append(y[begin:begin + audio_length])
            return pics


    def merge(self,r,g,b):
        n = len(os.listdir(self.img_save_path))  # 记录文件夹中已经存在的图片数量，避免重复生成覆盖
        normalize_tool = MinMaxScaler(feature_range=(0, 255))
        r = normalize_tool.fit_transform(r)
        g = normalize_tool.fit_transform(g)
        b = normalize_tool.fit_transform(b)
        r = Image.fromarray(r).convert('L')
        g = Image.fromarray(g).convert('L')
        b = Image.fromarray(b).convert('L')
        img = Image.merge('RGB', (r, g, b))
        # img.show()
        img.save(self.img_save_path + str(n) + '.png')







if __name__ == '__main__':
    filepath = r'E:\@MY_code\Voice_Recognize\data\yes_noise_wav'
    zqd = MGPmerge(filepath)
    for i in zqd.file_path_list:
        y,sr = librosa.load(i)
        audio_length = 64*64
        pieces = zqd.cut_pic(100, audio_length, y,mode=2)
        normalize_tool = MinMaxScaler(feature_range=(0, 255))
        for a in pieces:
            n = len(os.listdir(r'E:\@MY_code\Voice_Recognize\data\yes_noise_img_veri\\'))
            a = a.reshape(64, 64)
            b = normalize_tool.fit_transform(a)
            img = Image.fromarray(b).convert('L')
            # img.show()
            img.save(r'E:\@MY_code\Voice_Recognize\data\yes_noise_img_veri\\' + str(n) + '.png')




    # for i in zqd.file_path_list:
    #     y,sr = librosa.load(i)
    #     audio_length = round(50*0.01*sr)
    #     pieces = zqd.cut_pic(100,audio_length,y)
    #     for p in pieces:
    #         MFCC = zqd.cal_MFCC(p,sr)
    #         GFCC = zqd.cal_GFCC(p,sr)
    #         PNCC = zqd.cal_PNCC(p,sr)
    #         zqd.merge(MFCC,GFCC,PNCC)
