import os

import librosa
import numpy as np
import paddle
import soundfile as sf
from PIL.Image import Image
from paddle.io import Dataset
from scipy.signal import resample
from ssqueezepy import ssq_stft, ssq_cwt

from utils import simpledaw


def get_files(directory, ext, with_dot=True):
    path_list = []
    for paths in [[os.path.join(dirpath, name).replace('/', '\\') for name in filenames if
                   name.endswith(('.' if with_dot else '') + ext)] for
                  dirpath, dirnames, filenames in os.walk(directory)]:
        path_list.extend(paths)
    return path_list


def resize(spec_):
    if spec_.shape[0] % 2 != 0:
        img_ = Image.fromarray(spec_)
        img_ = img_.resize((spec_.shape[0] - 1, spec_.shape[1]))
        spec_ = np.array(img_)
    if spec_.shape[1] % 2 != 0:
        img_ = Image.fromarray(spec_)
        img_ = img_.resize((spec_.shape[0], spec_.shape[1] - 1))
        spec_ = np.array(img_)
    return spec_


def load_audio_as_spec(raw_audio_path, shape, compress_rate):
    raw = sf.read(raw_audio_path)

    dat_0 = resample(raw[:, 0], int(raw.shape[-2] / compress_rate))
    dat_1 = resample(raw[:, 1], int(raw.shape[-2] / compress_rate))
    dat_2 = resample(librosa.to_mono(raw), int(raw.shape[-2] / compress_rate))

    spec_0, *_ = ssq_stft(dat_0)
    spec_1, *_ = ssq_stft(dat_1)
    spec_2, *_ = ssq_cwt(dat_2)

    spec_0 = np.abs(spec_0)
    spec_1 = np.abs(spec_1)
    spec_2 = np.abs(spec_2)

    img_ = Image.fromarray(spec_0)
    img_ = img_.resize(shape)
    spec_0 = np.array(img_)
    img_ = Image.fromarray(spec_1)
    img_ = img_.resize(shape)
    spec_1 = np.array(img_)
    img_ = Image.fromarray(spec_2)
    img_ = img_.resize(shape)
    spec_2 = np.array(img_)

    return np.stack([spec_0, spec_1, spec_2], axis=-1)


class Audio2ParamsDataset(Dataset):
    def __init__(self, mode='train', **config):
        super(Audio2ParamsDataset, self).__init__()
        self.task_type = 'audio2params'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], 'wav')
            train_labels = get_files(config['train_label_dir'], config['preset_file_format'])
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
        else:
            test_samples = get_files(config['test_sample_dir'], 'wav')
            test_labels = get_files(config['test_label_dir'], config['preset_file_format'])
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))

        self.simple_daw = simpledaw.SimpleDAW(config['plugin_path'])
        self.config = config

    def __getitem__(self, index):
        spec = load_audio_as_spec(self.data[index][0], shape=(224, 224), compress_rate=self.config['compress_rate'])
        label = paddle.Tensor(self.simple_daw.get_params(self.data[index][1]))
        return spec, label

    def __len__(self):
        return len(self.data)


class Piece2AudioDataset(Dataset):
    def __init__(self, mode='train', **config):
        super(Piece2AudioDataset, self).__init__()
        self.task_type = 'piece2audio'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], 'npz')
            train_labels = get_files(config['train_label_dir'], 'wav')
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
        else:
            test_samples = get_files(config['test_sample_dir'], 'npz')
            test_labels = get_files(config['test_label_dir'], 'wav')
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))

        self.config = config

    def __getitem__(self, index):
        piece = np.load(self.data[index][0])
        spec = load_audio_as_spec(self.data[index][1], shape=(224, 224), compress_rate=self.config['compress_rate'])
        return (piece['midi'], piece['fx']), spec

    def __len__(self):
        return len(self.data)


class Audio2AudioDataset(Dataset):
    def __init__(self, mode='train', **config):
        super(Audio2AudioDataset, self).__init__()
        self.task_type = 'audio2audio'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], 'wav')
            train_labels = get_files(config['train_label_dir'], 'wav')
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
        else:
            test_samples = get_files(config['test_sample_dir'], 'wav')
            test_labels = get_files(config['test_label_dir'], 'wav')
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))

    def __getitem__(self, index):
        sample = self.data[index][0]
        label = self.data[index][1]

        return sample, label

    def __len__(self):
        return len(self.data)


class EEG2MIDIDataset(Dataset):
    def __init__(self, mode='train', **config):
        super(EEG2MIDIDataset, self).__init__()
        self.task_type = 'eeg2midi'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], config['eeg_file_format'])
            train_labels = get_files(config['train_label_dir'], 'mid')
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
        else:
            test_samples = get_files(config['test_sample_dir'], config['eeg_file_format'])
            test_labels = get_files(config['test_label_dir'], 'mid')
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))

    def __getitem__(self, index):
        sample = self.data[index][0]
        label = self.data[index][1]

        return sample, label

    def __len__(self):
        return len(self.data)


if __name__ == '__main__':
    dataset_class = Piece2AudioDataset
