import os
import torch
import librosa
import numpy as np
import soundfile as sf
from PIL import Image
from scipy.signal import resample
from ssqueezepy import ssq_stft, ssq_cwt


def get_files(directory, ext, with_dot=True):
    path_list = []
    for paths in [[os.path.join(dirpath, name) for name in filenames if
                   name.endswith(('.' if with_dot else '') + ext)] for
                  dirpath, dirnames, filenames in os.walk(directory)]:
        path_list.extend(paths)
    return path_list


class ParamsDataset(torch.utils.data.Dataset):
    def __init__(self, mode='train', **config):
        super(ParamsDataset, self).__init__()
        self.task_type = 'params'

        self.data = []
        if mode == 'train':
            with open(config['train_label_dir'] + '/params.txt', 'rt') as file:
                train_labels = [[float(text) for text in line.strip().split(',')] for line in file.readlines()]
            for label in train_labels:
                self.data.append(label)
            self.n_params = len(train_labels[0])
        else:
            with open(config['test_label_dir'] + '/params.txt', 'rt') as file:
                test_labels = [[float(text) for text in line.strip().split(',')] for line in file.readlines()]
            for label in test_labels:
                self.data.append(label)
            self.n_params = len(test_labels[0])
        

        self.config = config

    def __getitem__(self, index):
        params = self.data[index]
        return torch.Tensor(params),

    def __len__(self):
        return len(self.data)


def load_audio_as_spec(raw_audio_path, sample_rate, compress_rate, raw=None):
    if raw is None:
        raw, sample_rate = sf.read(raw_audio_path)

    dat_0 = resample(raw[:, 0], int(raw.shape[-2] / compress_rate))
    dat_1 = resample(raw[:, 1], int(raw.shape[-2] / compress_rate))
    dat_2 = resample(librosa.to_mono(raw), int(raw.shape[-2] / compress_rate))

    # spec_0 = librosa.feature.mfcc(y=dat_0, sr=sample_rate, n_mfcc=128)
    # spec_1 = librosa.feature.mfcc(y=dat_1, sr=sample_rate, n_mfcc=128)
    # spec_2 = librosa.feature.mfcc(y=dat_2, sr=sample_rate, n_mfcc=128)
    spec_0, *_ = ssq_cwt(dat_0)
    spec_1, *_ = ssq_cwt(dat_1)
    spec_2 = ssq_cwt(dat_2)[1]
    # spec_0 = librosa.stft(y=dat_0, n_fft=1024)
    # spec_1 = librosa.stft(y=dat_1, n_fft=1024)
    # d = librosa.stft(y=dat_2, n_fft=1024)
    # spec_2 = librosa.magphase(d)[0]

    spec_0 = np.abs(spec_0)
    spec_1 = np.abs(spec_1)
    spec_2 = np.abs(spec_2)

    img_ = Image.fromarray(spec_0)
    # print(spec_0.shape)
    img_ = img_.resize(spec_0.shape)
    spec_0_ = np.array(img_)
    img_ = Image.fromarray(spec_1)
    img_ = img_.resize(spec_0.shape)
    spec_1_ = np.array(img_)
    img_ = Image.fromarray(spec_2)
    img_ = img_.resize(spec_0.shape)
    spec_2_ = np.array(img_)

    # print(spec_0_.shape, spec_1_.shape, spec_2_.shape)

    return np.stack([spec_0_, spec_1_, spec_2_], axis=-1)


class Audio2ParamsDataset(torch.utils.data.Dataset):
    def __init__(self, mode='train', **config):
        super(Audio2ParamsDataset, self).__init__()
        self.task_type = 'audio2params'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], 'wav')
            with open(config['train_label_dir'] + '/params.txt', 'rt') as file:
                train_labels = [[float(text) for text in line.strip().split(',')] for line in file.readlines()]
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
            self.n_params = len(train_labels[0])
        else:
            test_samples = get_files(config['test_sample_dir'], 'wav')
            with open(config['test_label_dir'] + '/params.txt', 'rt') as file:
                test_labels = [[float(text) for text in line.strip().split(',')] for line in file.readlines()]
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))
            self.n_params = len(test_labels[0])
        

        self.config = config

    def __getitem__(self, index):
        spec = load_audio_as_spec(self.data[index][0], sample_rate=44100, compress_rate=self.config['compress_rate'])
        params = self.data[index][1]
        spec = torch.Tensor(spec)
        params = torch.Tensor(params)
        return spec, params

    def __len__(self):
        return len(self.data)
    
    
class AALDataset(torch.utils.data.Dataset):
    def __init__(self, mode='train', **config):
        super(AALDataset, self).__init__()
        self.task_type = 'audio-audio-label'

        self.data = []
        if mode == 'train':
            train_x1, train_x2 = [get_files(config['train_sample_dir'] + '/x1', 'wav'), 
                                  get_files(config['train_sample_dir'] + '/x2', 'wav')]
            with open(config['train_label_dir'] + '/labels.txt', 'rt') as file:
                train_labels = [float(line.strip()) for line in file.readlines()]
            for i, label in enumerate(train_labels):
                self.data.append((config['train_sample_dir'] + '/x1/{}.wav'.format(str(i)), 
                                  config['train_sample_dir'] + '/x2/{}.wav'.format(str(i)), label))
            self.n_params = len(train_labels)
        else:
            test_x1, test_x2 = [get_files(config['test_sample_dir'] + '/x1', 'wav'), 
                                  get_files(config['test_sample_dir'] + '/x2', 'wav')]
            with open(config['test_label_dir'] + '/labels.txt', 'rt') as file:
                test_labels = [float(line.strip()) for line in file.readlines()]
            for i, label in enumerate(test_labels):
                self.data.append((config['test_sample_dir'] + '/x1/{}.wav'.format(str(i)), 
                                  config['test_sample_dir'] + '/x2/{}.wav'.format(str(i)), label))
            self.n_params = len(test_labels)

        self.config = config

    def __getitem__(self, index):
        spec1 = load_audio_as_spec(self.data[index][0], sample_rate=44100, compress_rate=self.config['compress_rate'])
        spec1 = torch.Tensor(spec1)
        spec2 = load_audio_as_spec(self.data[index][1], sample_rate=44100, compress_rate=self.config['compress_rate'])
        spec2 = torch.Tensor(spec2)
        label = torch.Tensor([self.data[index][2]])
        return spec1, spec2, label

    def __len__(self):
        return len(self.data)
    