import torch
import toml, os
from AudioParser import AudioParser
from HyperParm import HyperParm
from skimage.transform import resize
import numpy as np
import random


class AudioDataset(torch.utils.data.Dataset):
    def __init__(self, data_root_dir: str, enhanced: bool = False):
        super(AudioDataset, self).__init__()
        self.enhanced = enhanced

        self.audioParser = AudioParser()
        config = toml.load(f"{data_root_dir}/config.toml")
        label = config["label"]

        self.data_info = []
        for dirpath, dirnames, filenames in os.walk(data_root_dir):
            for filename in filenames:
                classname = os.path.basename(dirpath)
                if classname in label:
                    self.data_info.append((f"{dirpath}/{filename}", label[classname]))

        self.buffer = {}

    def __getitem__(self, index):
        if index not in self.buffer:
            base_index = index // HyperParm.N_SPLIT
            if base_index >= len(self.data_info):
                is_enhanced = True
                true_index = base_index - len(self.data_info)
            else:
                is_enhanced = False
                true_index = base_index
            base_index *= HyperParm.N_SPLIT

            data_path, label = self.data_info[true_index]
            self.audioParser.load_audio(data_path)

            if is_enhanced and self.enhanced:
                self.audioParser.roll_audio(
                    random.randrange(1, len(self.audioParser.audio))
                )

            for i, item in enumerate(self.audioParser.split_audio(HyperParm.N_SPLIT)):
                self.buffer[base_index + i] = (item, label)

        audio, label = self.buffer.pop(index)
        audio_mel = resize(self.audioParser.get_mel(audio), HyperParm.PIC_SIZE)
        audio_chroma = resize(self.audioParser.get_chroma(audio), HyperParm.PIC_SIZE)
        audio_spectral_contrast = resize(
            self.audioParser.get_spectral_contrast(audio), HyperParm.PIC_SIZE
        )
        res = np.stack([audio_mel, audio_chroma, audio_spectral_contrast], axis=0)

        return res, label

    # def __getitem__(self, index):
    #     if index not in self.buffer:
    #         base_index = index // HyperParm.N_SPLIT
    #         if base_index >= len(self.data_info):
    #             is_enhanced = True
    #             true_index = base_index - len(self.data_info)
    #         else:
    #             is_enhanced = False
    #             true_index = base_index
    #         base_index *= HyperParm.N_SPLIT

    #         data_path, label = self.data_info[true_index]
    #         self.audioParser.load_audio(data_path)

    #         if is_enhanced and self.enhanced:
    #             self.audioParser.shuffle_audio(HyperParm.SHUFFLE_AUDIO_NUM)

    #         for i, item in enumerate(self.audioParser.split_audio(HyperParm.N_SPLIT)):
    #             self.buffer[base_index + i] = (item, label)

    #     audio, label = self.buffer.pop(index)
    #     audio_mel = resize(self.audioParser.get_mel(audio), HyperParm.DATA_SIZE)
    #     return audio_mel, label

    def __len__(self):
        base_len = HyperParm.N_SPLIT * len(self.data_info)
        if self.enhanced:
            return base_len * HyperParm.ENHANCED_RATE
        else:
            return base_len


def test():
    records = []
    config = toml.load("./data/config.toml")
    label = config["label"]
    for dirpath, dirnames, filenames in os.walk("data"):
        for filename in filenames:
            classname = os.path.basename(dirpath)
            if classname in label:
                records.append((f"{dirpath}/{filename}", label[classname]))
    print(records)

    # print(label)


if __name__ == "__main__":
    test()
