import os
import torch
from torch.utils.data import Dataset
import torchaudio
import librosa
import numpy as np

class TIMITDataset_(Dataset):
    def __init__(self, root_dir, split="train"):
        if split == "train":
            self.root_dir = os.path.join(root_dir, "TRAIN")
        elif split == "test":
            self.root_dir = os.path.join(root_dir, "TEST")
        self.speakers = []
        self.speaker_wavs = []
        # 遍历数据集目录，获取所有说话人和对应的音频文件路径
        for dr in os.listdir(self.root_dir):
            dr_path = os.path.join(self.root_dir, dr)
            if os.path.isdir(dr_path):
                for speaker in os.listdir(dr_path):
                    speaker_path = os.path.join(dr_path, speaker)
                    self.speakers.append(speaker)
                    if os.path.isdir(speaker_path):
                        for wav_file in os.listdir(speaker_path):
                            if wav_file.endswith(".wav"):
                                # 只加载单独的音频，不加载合并后的，便于单独训练
                                if wav_file.startswith("merge"):
                                    continue
                                self.speaker_wavs.append(os.path.join(speaker_path, wav_file))

    def __len__(self):
        return len(self.speaker_wavs)

    def __getitem__(self, index):
        speaker_wavs = self.speaker_wavs[index]
        cls_name = speaker_wavs.split('\\')[-2]
        label = self.speakers.index(cls_name)

        # 加载说话人的音频文件并提取MFCC特征
        waveform, sample_rate = torchaudio.load(speaker_wavs,)
        mfcc = self.extract_mfcc(waveform, sample_rate)
        mfcc = torch.tensor(mfcc).squeeze().mean(axis=1)
        
        return mfcc, label
    
    def extract_mfcc(self, wav, sample_rate):
        # 定义MFCC提取参数
        n_mfcc = 40
        n_fft = 512
        hop_length = 128
        n_mels = 128

        # 提取MFCC特征
        wav = np.array(wav)
        mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)

        return mfcc
        
class TIMITDataset(Dataset):
    def __init__(self, root_dir, split="train"):
        if split == "train":
            self.root_dir = os.path.join(root_dir, "TRAIN")
        elif split == "test":
            self.root_dir = os.path.join(root_dir, "TEST")
        self.speakers = []
        self.labels = []
        self.speaker_wavs = []
        # 遍历数据集目录，获取所有说话人和对应的音频文件路径
        for dr in os.listdir(self.root_dir):
            dr_path = os.path.join(self.root_dir, dr)
            if os.path.isdir(dr_path):
                for speaker in os.listdir(dr_path):
                    speaker_path = os.path.join(dr_path, speaker)
                    if os.path.isdir(speaker_path):
                        speaker_wavs = []
                        for wav_file in os.listdir(speaker_path):
                            if wav_file.endswith(".wav"):
                                # 只加载单独的音频，不加载合并后的，便于单独训练
                                if wav_file.startswith("merge"):
                                    continue
                                speaker_wavs.append(os.path.join(speaker_path, wav_file))
                        if speaker_wavs:
                            self.speakers.append(speaker)
                            # 每个speaker下有一个wav列表，里面有两个wav_path
                            self.speaker_wavs.append(speaker_wavs)
                            # label是speaker的编号
                            self.labels.append(len(self.speakers) - 1)

    def __len__(self):
        return len(self.speakers)

    def __getitem__(self, index):
        speaker_wavs = self.speaker_wavs[index]
        label = self.labels[index]

        # 加载说话人的所有音频文件并提取MFCC特征
        mfccs = []
        wavs = []
        for wav_path in speaker_wavs:
            waveform, sample_rate = torchaudio.load(wav_path)
            mfcc = self.extract_mfcc(waveform, sample_rate)
            mfcc = torch.tensor(mfcc)
            mfccs.append(mfcc)
            wavs.append(waveform)
        
        return wavs, mfccs, label
    

    def extract_mfcc(self, wav, sample_rate):
        # 定义MFCC提取参数
        n_mfcc = 24
        n_fft = 512
        hop_length = 128
        n_mels = 128

        # 提取MFCC特征
        wav = np.array(wav)
        mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)

        return np.transpose(mfcc,(0,2,1)).squeeze(0)

