import os
import random

import torch
import librosa
import numpy as np
import soundfile as sf
from PIL import Image
from scipy.signal import resample
from ssqueezepy import ssq_stft, ssq_cwt, fft
import pydub
import io
import scipy.io.wavfile
import matplotlib.pyplot as plt

from Artear.utils.simpledaw import SimpleDAW


def get_files(directory, ext, with_dot=True):
    path_list = []
    for paths in [[os.path.join(dirpath, name) for name in filenames if
                   name.endswith(('.' if with_dot else '') + ext)] for
                  dirpath, dirnames, filenames in os.walk(directory)]:
        path_list.extend(paths)
    return path_list


class AudioDataset(torch.utils.data.Dataset):
    def __init__(self, mode='train', sub_dir='', max_duration=1., **config):
        super(AudioDataset, self).__init__()
        self.task_type = 'audio'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'] + '/' + sub_dir, 'wav')
            for sample in train_samples:
                self.data.append(sample)
        else:
            test_samples = get_files(config['test_sample_dir'] + '/' + sub_dir, 'wav')
            for sample in test_samples:
                self.data.append(sample)

        self.config = config
        self.max_duration = max_duration

    def __getitem__(self, index):
        spec, raw_length, raw_sr = load_audio_as_spec(self.data[index], 
                                  max_duration=self.max_duration, 
                                  sample_rate_=int(self.config['sample_rate']),
                                  compress_rate=self.config['compress_rate'])
        return torch.transpose(torch.Tensor(spec), 0, 2), raw_length, raw_sr

    def __len__(self):
        return len(self.data)


def segment_to_numpy(sound):
    channel_sounds = sound.split_to_mono()
    samples = [s.get_array_of_samples() for s in channel_sounds]
    fp_arr = np.array(samples).T.astype(np.float32)
    fp_arr /= np.iinfo(samples[0].typecode).max
    return fp_arr


def numpy_to_segment(fp_arr, sample_rate):
    wav_io = io.BytesIO()
    scipy.io.wavfile.write(wav_io, sample_rate, fp_arr)
    wav_io.seek(0)
    sound = pydub.AudioSegment.from_wav(wav_io)
    return sound


def load_audio_as_spec(raw_audio_path, max_duration, sample_rate_, compress_rate, raw=None):
    if raw is None:
        raw, raw_sr = sf.read(raw_audio_path)
    # print(raw_audio_path, raw_sr, raw.shape, int(max_duration * sample_rate_ / compress_rate))
    raw_length = raw.shape[0]

    seg = pydub.AudioSegment.silent(duration=int(max_duration * 1000), frame_rate=sample_rate_)
    
    comp_length = int(seg.duration_seconds * seg.frame_rate / compress_rate)
    
    seg1 = pydub.AudioSegment.from_wav(raw_audio_path)[:int(max_duration * 1000)]
    seg1.set_frame_rate(seg.frame_rate)
    seg1.set_sample_width(seg.sample_width)

    # print(max_duration, sample_rate_)
    '''arr = segment_to_numpy(seg)
    arr1 = segment_to_numpy(seg1)
    print(max_duration, sample_rate, arr.shape[0], arr1.shape[0])
    if arr.shape[0] > arr1.shape[0]:
        arr1 = arr1[:arr.shape[0], ...]
        seg1 = numpy_to_segment(arr1, sample_rate)'''
    '''if seg.duration_seconds > seg1.duration_seconds:
        seg = seg.overlay(seg1)
    else:
        seg = seg1.overlay(seg)'''
    seg = seg.overlay(seg1)
    seg.set_frame_rate(seg1.frame_rate)
    seg.set_sample_width(seg1.sample_width)
    
    arr = segment_to_numpy(seg)
    dat_0 = resample(arr[:, 0], comp_length)
    if len(arr.shape) == 2 and arr.shape[1] == 2:
        dat_1 = resample(arr[:, 1], comp_length)
        dat_2 = resample(librosa.to_mono(dat_0), comp_length)
    else:
        dat_1 = dat_0
        dat_2 = dat_0
    
    # print(seg.duration_seconds, seg.duration_seconds)
        
    # spec_0 = librosa.feature.mfcc(y=dat_0, sr=sample_rate, n_mfcc=128)
    # spec_1 = librosa.feature.mfcc(y=dat_1, sr=sample_rate, n_mfcc=128)
    # spec_2 = librosa.feature.mfcc(y=dat_2, sr=sample_rate, n_mfcc=128)
    spec_0, *_ = ssq_cwt(dat_0)
    # spec_1, *_ = ssq_cwt(dat_1)
    # spec_2 = ssq_cwt(dat_2)[0]
    # spec_0 = librosa.stft(y=dat_0, n_fft=1024)
    # spec_1 = librosa.stft(y=dat_1, n_fft=1024)
    # d = librosa.stft(y=dat_2, n_fft=1024)
    # spec_2 = librosa.magphase(d)[0]

    spec_0_ = np.real(spec_0)
    spec_1_ = np.imag(spec_0)
    spec_2_ = np.abs(spec_0)
    
    spec_shape = [spec_0.shape[0], comp_length]
    while spec_shape[0] % 4 != 0:
        spec_shape[0] -= 1
    while spec_shape[1] % 4 != 0:
        spec_shape[1] -= 1

    img_ = Image.fromarray(spec_0_)
    # print(spec_0.shape)
    img_ = img_.resize(spec_shape)
    spec_0_ = np.array(img_)
    img_ = Image.fromarray(spec_1_)
    img_ = img_.resize(spec_shape)
    spec_1_ = np.array(img_)
    img_ = Image.fromarray(spec_2_)
    img_ = img_.resize(spec_shape)
    spec_2_ = np.array(img_)

    # print(raw_audio_path, spec_0_.shape, spec_1_.shape, spec_2_.shape)
    spec = np.stack([spec_0_, spec_1_, spec_2_], axis=-1)
    
    # plt.imshow(spec_2_)
    # plt.show()
    # print(spec.shape)
    # return np.pad(spec, pad_width=((0, max_length - spec.shape[0] if max_length > spec.shape[0] else 0),
    #                                (0, 0), (0, 0)), constant_values=0.)
    return spec, raw_length, raw_sr


def load_audio_as_array(raw_audio_path, max_duration, sample_rate_, compress_rate, raw=None):
    if raw is None:
        raw, raw_sr = sf.read(raw_audio_path)
    else:
        raw_sr = sample_rate_

    raw_length = raw.shape[0]

    seg = pydub.AudioSegment.silent(duration=int(max_duration * 1000), frame_rate=sample_rate_)

    comp_length = int(max_duration * sample_rate_ / compress_rate)

    if raw_audio_path:
        seg1 = pydub.AudioSegment.from_wav(raw_audio_path)[:int(max_duration * 1000)]
    elif raw is None:
        seg1 = pydub.AudioSegment.silent(duration=int(max_duration * 1000), frame_rate=sample_rate_)
    else:
        seg1 = numpy_to_segment(raw, int(sample_rate_))

    seg1.set_frame_rate(seg.frame_rate)
    seg1.set_sample_width(seg.sample_width)

    arr = segment_to_numpy(seg)
    arr1 = segment_to_numpy(seg1)
    # print(max_duration, sample_rate_, arr.shape[0], arr1.shape[0])
    if arr.shape[0] > arr1.shape[0]:
        arr1 = arr1[:arr.shape[0], ...]
        seg1 = numpy_to_segment(arr1, sample_rate_)
    if seg.duration_seconds > seg1.duration_seconds:
        seg = seg.overlay(seg1)
    else:
        seg = seg1.overlay(seg)

    seg = seg.overlay(seg1)
    seg.set_frame_rate(seg1.frame_rate)
    seg.set_sample_width(seg1.sample_width)

    arr = segment_to_numpy(seg)

    dat_0 = resample(arr[:, 0], comp_length)
    if len(arr.shape) == 2 and arr.shape[1] == 2:
        dat_1 = resample(arr[:, 1], comp_length)
    else:
        dat_1 = dat_0

    return librosa.to_mono(np.stack([dat_0, dat_1], axis=0)), raw_length, raw_sr


class SynthOperationDataset(torch.utils.data.Dataset):
    def __init__(self, simpledaw, param_index, sample_rate=44100., mode='train', sub_dir='', max_duration=1., **config):
        super(SynthOperationDataset, self).__init__()

        self.simpledaw = simpledaw
        self.simpledaw.load_midi(r"B:\muse_repo\MIDI\melody\20220608_01.mid")
        self.init_params = torch.tensor(self.simpledaw.get_params())
        self.n_params = len(self.init_params)
        # print(self.n_params)

        self.param_index = param_index

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_preset_dir'] + '/' + sub_dir, 'fxp')
            for sample in train_samples:
                self.data.append(sample)
        else:
            test_samples = get_files(config['test_preset_dir'] + '/' + sub_dir, 'fxp')
            for sample in test_samples:
                self.data.append(sample)

        self.config = config
        self.sample_rate = sample_rate
        self.max_duration = max_duration

    def __getitem__(self, index):
        params1 = np.array(self.simpledaw.get_params(self.data[index]))
        self.simpledaw.set_params(params1.tolist())
        audio1 = librosa.to_mono(self.simpledaw.render(self.max_duration).transpose(1, 0))
        params2 = params1.copy()
        # rand_index = random.randint(0, params2.shape[0] - 1)
        rand_index = self.param_index
        params2[rand_index] = random.uniform(0, 1)
        self.simpledaw.set_params(params2.tolist())
        audio2 = librosa.to_mono(self.simpledaw.render(self.max_duration).transpose(1, 0))
        params1 = torch.tensor(params1)
        params2 = torch.tensor(params2)
        params_delta = params2 - params1
        audio1 = torch.tensor(load_audio_as_array(raw=audio1,
                                                  compress_rate=self.config['compress_rate'],
                                                  max_duration=self.max_duration,
                                                  sample_rate_=self.sample_rate,
                                                  raw_audio_path='')[0])
        audio2 = torch.tensor(load_audio_as_array(raw=audio2,
                                                  compress_rate=self.config['compress_rate'],
                                                  max_duration=self.max_duration,
                                                  sample_rate_=self.sample_rate,
                                                  raw_audio_path='')[0])
        # print(audio1.size())
        return audio1, audio2, params1, rand_index, params_delta, self.init_params

    def __len__(self):
        return len(self.data)


class SynthParamsDataset(torch.utils.data.Dataset):
    def __init__(self, simpledaw, sample_rate=44100., mode='train', sub_dir='', max_duration=1., **config):
        super(SynthParamsDataset, self).__init__()

        self.simpledaw = simpledaw
        self.simpledaw.load_midi(r"B:\muse_repo\MIDI\melody\20220608_01.mid")
        self.init_params = torch.tensor(self.simpledaw.get_params())
        self.n_params = len(self.init_params)
        # print(self.n_params)

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_preset_dir'] + '/' + sub_dir, 'fxp')
            for sample in train_samples:
                self.data.append(sample)
        else:
            test_samples = get_files(config['test_preset_dir'] + '/' + sub_dir, 'fxp')
            for sample in test_samples:
                self.data.append(sample)

        self.config = config
        self.sample_rate = sample_rate
        self.max_duration = max_duration

    def __getitem__(self, index):
        self.simpledaw.set_params(self.init_params.numpy().tolist())
        audio1 = librosa.to_mono(self.simpledaw.render(self.max_duration).transpose(1, 0))
        params2 = torch.tensor(self.simpledaw.get_params(self.data[index]))
        audio2 = librosa.to_mono(self.simpledaw.render(self.max_duration).transpose(1, 0))
        params_delta = params2 - self.init_params
        audio1 = torch.tensor(load_audio_as_array(raw=audio1,
                                                  compress_rate=self.config['compress_rate'],
                                                  max_duration=self.max_duration,
                                                  sample_rate_=self.sample_rate,
                                                  raw_audio_path='')[0])
        audio2 = torch.tensor(load_audio_as_array(raw=audio2,
                                                  compress_rate=self.config['compress_rate'],
                                                  max_duration=self.max_duration,
                                                  sample_rate_=self.sample_rate,
                                                  raw_audio_path='')[0])
        # print(audio1.size())
        return audio1, audio2, params_delta, self.init_params

    def __len__(self):
        return len(self.data)
