from artear2_7.samplernn.model import SampleRNN, Predictor, Generator

from torch.nn.functional import hardtanh

import artear2_7.samplernn.utils as utils

import torch
from torch.utils.data import (
    Dataset, DataLoader as DataLoaderBase
)

from librosa.core import load
from natsort import natsorted

from os import listdir
from os.path import join

import numpy as np
import scipy.io.wavfile
import io
import pydub
import soundfile as sf
from scipy.signal import resample
import librosa
import sounddevice as sd

from artear2_7.samplernn.model import sequence_nll_loss_bits


default_params = {
    # model parameters
    'n_rnn': 1,
    'dim': 1024,
    'learn_h0': True,
    'q_levels': 256,
    'seq_len': 22050,
    'weight_norm': True,
    'batch_size': 1,
    'val_frac': 0.1,
    'test_frac': 0.1,

    # training parameters
    'keep_old_checkpoints': False,
    'datasets_path': 'datasets',
    'results_path': 'results',
    'epoch_limit': 1000,
    'resume': True,
    'sample_rate': 16000,
    'n_samples': 1,
    'sample_length': 80000,
    'loss_smoothing': 0.99,
    'cuda': True,
    'comet_key': None,

    'frame_sizes': [10],
}


def segment_to_numpy(sound):
    channel_sounds = sound.split_to_mono()
    samples = [s.get_array_of_samples() for s in channel_sounds]
    fp_arr = np.array(samples).T.astype(np.float32)
    fp_arr /= np.iinfo(samples[0].typecode).max
    return fp_arr


def numpy_to_segment(fp_arr, sample_rate):
    wav_io = io.BytesIO()
    scipy.io.wavfile.write(wav_io, sample_rate, fp_arr)
    wav_io.seek(0)
    sound = pydub.AudioSegment.from_wav(wav_io)
    return sound


def load_audio_as_array(raw_audio_path, max_duration, sample_rate_, compress_rate, raw=None):
    if raw is None:
        raw, raw_sr = sf.read(raw_audio_path)
    else:
        raw_sr = sample_rate_
    # print(raw_audio_path, raw_sr, raw.shape, int(max_duration * sample_rate_ / compress_rate))
    raw_length = raw.shape[0]

    seg = pydub.AudioSegment.silent(duration=int(max_duration * 1000), frame_rate=sample_rate_)

    comp_length = int(seg.duration_seconds * seg.frame_rate / compress_rate)

    seg1 = pydub.AudioSegment.from_wav(raw_audio_path)[:int(max_duration * 1000)]
    seg1.set_frame_rate(seg.frame_rate)
    seg1.set_sample_width(seg.sample_width)

    arr = segment_to_numpy(seg)
    arr1 = segment_to_numpy(seg1)
    print(max_duration, sample_rate_, arr.shape[0], arr1.shape[0])
    if arr.shape[0] > arr1.shape[0]:
        arr1 = arr1[:arr.shape[0], ...]
        seg1 = numpy_to_segment(arr1, sample_rate_)
    if seg.duration_seconds > seg1.duration_seconds:
        seg = seg.overlay(seg1)
    else:
        seg = seg1.overlay(seg)

    seg = seg.overlay(seg1)
    seg.set_frame_rate(seg1.frame_rate)
    seg.set_sample_width(seg1.sample_width)

    arr = segment_to_numpy(seg)
    # dat_0 = resample(arr[:, 0], comp_length)
    # if len(arr.shape) == 2 and arr.shape[1] == 2:
    #     dat_1 = resample(arr[:, 1], comp_length)
    #     # dat_2 = resample(librosa.to_mono(dat_0), comp_length)
    # else:
    #     dat_1 = dat_0
    #     # dat_2 = dat_0

    return resample(librosa.to_mono(arr), comp_length), raw_length, raw_sr


class FolderDataset(Dataset):

    def __init__(self, path, overlap_len, q_levels, ratio_min=0, ratio_max=1):
        super().__init__()
        self.overlap_len = overlap_len
        self.q_levels = q_levels
        file_names = natsorted(
            [join(path, file_name) for file_name in listdir(path) if file_name.endswith('.wav')]
        )
        self.file_names = file_names[
                          int(ratio_min * len(file_names)) : int(ratio_max * len(file_names))
                          ]

    def __getitem__(self, index):
        # (seq, _) = load(self.file_names[index], sr=None, mono=True)
        seq, raw_length, raw_sr = load_audio_as_array(self.file_names[index], max_duration=2, sample_rate_=44100, compress_rate=1)
        return torch.cat([
            torch.LongTensor(self.overlap_len) \
                .fill_(utils.q_zero(self.q_levels)),
            utils.linear_quantize(
                torch.from_numpy(seq), self.q_levels
            )
        ])

    def __len__(self):
        return len(self.file_names)


class DataLoader(DataLoaderBase):

    def __init__(self, dataset, batch_size, seq_len, overlap_len,
                 *args, **kwargs):
        super().__init__(dataset, batch_size, *args, **kwargs)
        self.seq_len = seq_len
        self.overlap_len = overlap_len

    def __iter__(self):
        for batch in super().__iter__():
            (batch_size, n_samples) = batch.size()

            reset = True

            for seq_begin in range(self.overlap_len, n_samples, self.seq_len):
                from_index = seq_begin - self.overlap_len
                to_index = seq_begin + self.seq_len
                sequences = batch[:, from_index : to_index]
                input_sequences = sequences[:, : -1]
                target_sequences = sequences[:, self.overlap_len :].contiguous()

                yield (input_sequences, reset, target_sequences)

                reset = False

    def __len__(self):
        raise NotImplementedError()


def gradient_clipping(optimizer, min=-1, max=1):

    class OptimizerWrapper(object):

        def step(self, closure):
            def closure_wrapper():
                loss = closure()
                for group in optimizer.param_groups:
                    for p in group['params']:
                        hardtanh(p.grad, min, max, inplace=True)
                return loss

            return optimizer.step(closure_wrapper)

        def __getattr__(self, attr):
            return getattr(optimizer, attr)

    return OptimizerWrapper()


def main(dataset=FolderDataset(r"C:\dev_spa\DMuse\新建文件夹", 0, default_params['q_levels']), **params):
    steps = 2000

    cwd = 'C:/dev_spa/DMuse/202202c1'

    config = {
        'dataset_config': {
            'train_sample_dir': cwd,
            'test_sample_dir': cwd,
            'train_label_dir': cwd,
            'test_label_dir': cwd,
            'max_duration': .5,
            'sample_rate': 44100,
            'compress_rate': 10.,
        },
        'batch_size': 2,
        'shuffle': True,
    }

    params = dict(
        dataset=dataset,
        **params
    )

    results_path = r"C:\dev_spa\DMuse\samplernn-pytorch-master\datasets"
    # tee_stdout(os.path.join(results_path, 'log'))

    model = SampleRNN(
        frame_sizes=params['frame_sizes'],
        n_rnn=params['n_rnn'],
        dim=params['dim'],
        learn_h0=params['learn_h0'],
        q_levels=params['q_levels'],
        weight_norm=params['weight_norm']
    )
    predictor = Predictor(model)
    if params['cuda']:
        model = model.cuda()
        predictor = predictor.cuda()

    generator = Generator(model, cuda=True)

    optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))
    # optimizer = torch.optim.Adam(predictor.parameters(), lr=0.00001)

    data_loader = DataLoader(
        dataset,
        batch_size=params['batch_size'],
        seq_len=params['seq_len'],
        overlap_len=model.lookback,
        shuffle=(not eval),
        drop_last=(not eval)
    )
    test_split = 1 - params['test_frac']
    val_split = test_split - params['val_frac']

    input_seqs, reset, target_seqs = iter(data_loader).__next__()
    output = predictor(input_seqs.cuda(), reset)
    # print(output)

    for step in range(1, steps + 1):
        input_seqs, reset, target_seqs = iter(data_loader).__next__()

        # x = x.cuda()
        # output = predictor(input_seqs.cuda(), reset)
        # loss = sequence_nll_loss_bits(output, target_seqs.cuda())
        # print('step:', step, 'loss:', loss.item())
        # optimizer.zero_grad()
        # loss.backward()
        output = target_seqs

        def closure():
            nonlocal output
            output = predictor(input_seqs.cuda(), reset)
            print(output.size())
            loss = sequence_nll_loss_bits(output, target_seqs.cuda())
            print('step:', step, 'loss:', loss.item())
            optimizer.zero_grad()
            loss.backward()
            return loss

        optimizer.step(closure)
        # loss = closure()
        # optimizer.step()

        if step % 1 == 0:
            output = generator(output.size(0), output.size(1))
            print(output.size(), int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate']))
            audio = resample(output.cpu().detach()[0, ...], int(config['dataset_config']['max_duration'] * config['dataset_config']['sample_rate']))
            sd.play(audio, samplerate=config['dataset_config']['sample_rate'], blocking=False)
            sf.write(cwd + '/img/{}.wav'.format(step), audio, samplerate=config['dataset_config']['sample_rate'])
            torch.save(model.state_dict(), cwd + '/sound_vae5.pth')

    # trainer = Trainer(
    #     predictor, sequence_nll_loss_bits, optimizer,
    #     data_loader(0, val_split, eval=False),
    #     cuda=params['cuda']
    # )
    #
    # checkpoints_path = os.path.join(results_path, 'checkpoints')
    # checkpoint_data = load_last_checkpoint(checkpoints_path)
    # if checkpoint_data is not None:
    #     (state_dict, epoch, iteration) = checkpoint_data
    #     trainer.epochs = epoch
    #     trainer.iterations = iteration
    #     predictor.load_state_dict(state_dict)
    #
    # trainer.run(params['epoch_limit'])


if __name__ == '__main__':
    main(**default_params)
