import os
import random

import librosa
import numpy as np

from skimage import io
from skimage import transform
import matplotlib.pyplot as plt
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
from torchvision.utils import make_grid

# import fnmatch

datapath = 'D:\\vox1_dev_wave\\wav'
txtpath_train = './data/train_list.txt'
txtpath_test = './data/test_list.txt'


class AudioDataset1(Dataset):
    def __init__(self, txtpath, sr=16000, dimension=32640):
        self.txtpath = txtpath
        self.sr = sr
        self.dim = dimension

        # 获取音频名列表
        wav_list = []
        datainfo = open(txtpath, 'r')
        for line in datainfo:
            line = line.strip('\n')
            words = line.split()
            wav_list.append((words[0], words[1]))

            # 显示wav_list
            # print(np.shape(wav_list))
            # print(wav_list[np.shape(wav_list)[0]-1])

        self.wav_list = wav_list

    def __getitem__(self, item):

        filename, lable = self.wav_list[item]
        wb_wav, sr = librosa.load(filename, sr=self.sr)

        # 取 帧
        if len(wb_wav) >= self.dim:
            max_audio_start = len(wb_wav) - self.dim
            audio_start = np.random.randint(0, max_audio_start)
            wb_wav = wb_wav[audio_start: audio_start + self.dim]
        else:
            wb_wav = np.pad(wb_wav, (0, self.dim - len(wb_wav)), "constant")

        # print('ok')
        mel = librosa.feature.melspectrogram(y=wb_wav, sr=self.sr, hop_length=256, win_length=256)

        # plt.imshow(mel, origin='lower', aspect='auto', interpolation='nearest')
        # plt.show()
        # 增加一个维度
        # mel = mel.unsqueeze(0)
        # trans = transforms.ToTensor()
        # print(trans(mel).type())
        return mel, lable

        # wav, lable = self.wav_list[item]
        # wav, sr = librosa.load(wav, sr=16000)
        # print(np.shape(wav))
        #
        # return wav, lable

    def __len__(self):
        # 音频文件的总数
        # print(len(self.wav_list))
        return len(self.wav_list)


class AudioDataset2(Dataset):
    def __init__(self, txtpath, sr=16000, dimension=32640):
        self.txtpath = txtpath
        self.sr = sr
        self.dim = dimension

        # 获取音频名列表
        wav_list = []
        datainfo = open(txtpath, 'r')
        for line in datainfo:
            line = line.strip('\n')
            words = line.split()
            wav_list.append((words[0], words[1]))

            # 显示wav_list
            # print(np.shape(wav_list))
            # print(wav_list[np.shape(wav_list)[0]-1])

        self.wav_list = wav_list

    def __getitem__(self, item):

        filename, lable = self.wav_list[item]
        wb_wav, sr = librosa.load(filename, sr=self.sr)

        # 取 帧
        if len(wb_wav) >= self.dim:
            max_audio_start = len(wb_wav) - self.dim
            audio_start = np.random.randint(0, max_audio_start)
            wb_wav = wb_wav[audio_start: audio_start + self.dim]
        else:
            wb_wav = np.pad(wb_wav, (0, self.dim - len(wb_wav)), "constant")

        # print('ok')
        mel = librosa.feature.melspectrogram(y=wb_wav, sr=self.sr, hop_length=256, win_length=512)

        # plt.imshow(mel, origin='lower', aspect='auto', interpolation='nearest')
        # plt.show()
        # 增加一个维度
        # mel = mel.unsqueeze(0)
        # trans = transforms.ToTensor()
        # print(trans(mel).type())
        return mel, lable

        # wav, lable = self.wav_list[item]
        # wav, sr = librosa.load(wav, sr=16000)
        # print(np.shape(wav))
        #
        # return wav, lable

    def __len__(self):
        # 音频文件的总数
        # print(len(self.wav_list))
        return len(self.wav_list)


class AudioDataset3(Dataset):
    def __init__(self, txtpath, sr=16000, dimension=32640):
        self.txtpath = txtpath
        self.sr = sr
        self.dim = dimension

        # 获取音频名列表
        wav_list = []
        datainfo = open(txtpath, 'r')
        for line in datainfo:
            line = line.strip('\n')
            words = line.split()
            wav_list.append((words[0], words[1]))

            # 显示wav_list
            # print(np.shape(wav_list))
            # print(wav_list[np.shape(wav_list)[0]-1])

        self.wav_list = wav_list

    def __getitem__(self, item):

        filename, lable = self.wav_list[item]
        wb_wav, sr = librosa.load(filename, sr=self.sr)

        # 取 帧
        if len(wb_wav) >= self.dim:
            max_audio_start = len(wb_wav) - self.dim
            audio_start = np.random.randint(0, max_audio_start)
            wb_wav = wb_wav[audio_start: audio_start + self.dim]
        else:
            wb_wav = np.pad(wb_wav, (0, self.dim - len(wb_wav)), "constant")

        # print('ok')
        mel = librosa.feature.melspectrogram(y=wb_wav, sr=self.sr, hop_length=256, win_length=1024)

        # plt.imshow(mel, origin='lower', aspect='auto', interpolation='nearest')
        # plt.show()
        # 增加一个维度
        # mel = mel.unsqueeze(0)
        # trans = transforms.ToTensor()
        # print(trans(mel).type())
        return mel, lable

        # wav, lable = self.wav_list[item]
        # wav, sr = librosa.load(wav, sr=16000)
        # print(np.shape(wav))
        #
        # return wav, lable

    def __len__(self):
        # 音频文件的总数
        # print(len(self.wav_list))
        return len(self.wav_list)


class AudioDataset4(Dataset):
    def __init__(self, txtpath, sr=16000, dimension=32640):
        self.txtpath = txtpath
        self.sr = sr
        self.dim = dimension

        # 获取音频名列表
        wav_list = []
        datainfo = open(txtpath, 'r')
        for line in datainfo:
            line = line.strip('\n')
            words = line.split()
            wav_list.append((words[0], words[1]))

            # 显示wav_list
            # print(np.shape(wav_list))
            # print(wav_list[np.shape(wav_list)[0]-1])

        self.wav_list = wav_list

    def __getitem__(self, item):

        filename, lable = self.wav_list[item]
        wb_wav, sr = librosa.load(filename, sr=self.sr)

        # 取 帧
        if len(wb_wav) >= self.dim:
            max_audio_start = len(wb_wav) - self.dim
            audio_start = np.random.randint(0, max_audio_start)
            wb_wav = wb_wav[audio_start: audio_start + self.dim]
        else:
            wb_wav = np.pad(wb_wav, (0, self.dim - len(wb_wav)), "constant")

        # print('ok')
        mel = librosa.feature.melspectrogram(y=wb_wav, sr=self.sr, hop_length=256, win_length=2048)

        # plt.imshow(mel, origin='lower', aspect='auto', interpolation='nearest')
        # plt.show()
        # 增加一个维度
        # mel = mel.unsqueeze(0)
        # trans = transforms.ToTensor()
        # print(trans(mel).type())
        return mel, lable

        # wav, lable = self.wav_list[item]
        # wav, sr = librosa.load(wav, sr=16000)
        # print(np.shape(wav))
        #
        # return wav, lable

    def __len__(self):
        # 音频文件的总数
        # print(len(self.wav_list))
        return len(self.wav_list)


# 生成数据列表
def get_data_list(audio_path, list_path):
    files = os.listdir(audio_path)
    f_train = open(os.path.join(list_path, 'train_list.txt'), 'w')
    f_test = open(os.path.join(list_path, 'test_list.txt'), 'w')

    sound_sum = 0
    s = set()  # set() 函数创建一个无序不重复元素集，删除重复数据
    for file in files:
        if '.wav' not in file:
            continue
        s.add(file[:15])
        sound_path = os.path.join(audio_path, file)
        if sound_sum % 10 == 0:
            f_test.write('%s\t%d\n' % (sound_path.replace('\\', '/'), len(s) - 1))
        else:
            f_train.write('%s\t%d\n' % (sound_path.replace('\\', '/'), len(s) - 1))

        if sound_sum >= 359:
            break
        sound_sum += 1

    f_test.close()
    f_train.close()


if __name__ == '__main__':
    get_data_list(datapath, 'data')

    # 实例化AudioDataset对象
    # train_set1 = AudioDataset1(txtpath_train, sr=16000)
    # train_loader1 = DataLoader(train_set1, batch_size=32, shuffle=True, pin_memory=True)
    # test_set1 = AudioDataset1(txtpath_test, sr=16000)
    # test_loader1 = DataLoader(train_set1, batch_size=32, shuffle=True,pin_memory=True)
    #
    # train_set2 = AudioDataset2(txtpath_train, sr=16000)
    # train_loader2 = DataLoader(train_set2, batch_size=32, shuffle=True,pin_memory=True)
    # test_set2 = AudioDataset2(txtpath_test, sr=16000)
    # test_loader2 = DataLoader(train_set2, batch_size=32, shuffle=True,pin_memory=True)
    #
    # train_set3 = AudioDataset3(txtpath_train, sr=16000)
    # train_loader3 = DataLoader(train_set3, batch_size=32, shuffle=True)
    # test_set3 = AudioDataset3(txtpath_test, sr=16000)
    # test_loader3 = DataLoader(train_set3, batch_size=32, shuffle=True)
    #
    # train_set4 = AudioDataset4(txtpath_train, sr=16000)
    # train_loader4 = DataLoader(train_set4, batch_size=32, shuffle=True)
    # test_set4 = AudioDataset4(txtpath_test, sr=16000)
    # test_loader4 = DataLoader(train_set4, batch_size=32, shuffle=True)

    # train_loader = train_loader.unsqueeze(0)
    # x, label = iter(train_loader1).next()
    # print('x:', np.shape(x), 'label:', label)

    # test_set = AudioDataset(txtpath_test, sr=16000)
    # test_loader = DataLoader(test_set, batch_size=32, shuffle=True)
