import os
import random
import time

import numpy as np
import scipy.signal
import sounddevice as sd
from keras import Input
from keras import Model
from keras.layers import Conv1D, MaxPooling1D, Dense, Flatten
from utils.simpledaw import SimpleDAW
from utils.util import get_files


def resample(raw, source_sample_rate, target_sample_rate):
    return scipy.signal.resample(raw, int(raw.shape[0] * target_sample_rate / source_sample_rate))


def preprocess(batch_size, duration, source_sample_rate, target_sample_rate):
    daw = SimpleDAW(plugin='C:/VstPlugins/64bit/Sylenth1.dll', sample_rate=44100, bpm=120)
    presets = get_files(r"B:\muse_repo\Presets\Legacy", 'fxp')
    midis = get_files(r"B:\muse_repo\MIDI", 'mid')

    audio_data, param_data, midi_data, raw_audio_data = [], [], [], []

    for i in range(batch_size):
        params = np.array(daw.get_params(random.choice(presets)))
        param_data.append(params.reshape((1,) + params.shape))
        midi = random.choice(midis)
        daw.load_midi(midi)
        midi_data.append(midi)
        raw = daw.render(duration)
        raw_audio_data.append(raw.reshape((1,) + raw.shape))
        audio = resample(raw, source_sample_rate, target_sample_rate)
        audio_data.append(audio.reshape((1,) + audio.shape))
        print(raw_audio_data[0].shape, audio_data[0].shape)

    return audio_data, param_data, midi_data, raw_audio_data


def build_model(input_shape, n_params, hidden_size=16):
    in_ = Input(shape=input_shape)
    '''x = Reshape((-1,))(in_)
    x = Embedding(input_shape[0] * input_shape[1], hidden_size)(x)
    x = Permute((2, 1))(in_)
    print(x)'''
    x = Conv1D(512, 3, padding='same',  activation='relu')(in_)
    x = MaxPooling1D(8)(x)
    x = Conv1D(128, 3, padding='same', activation='relu')(x)
    x = MaxPooling1D(8)(x)
    x = Flatten()(x)
    # x = GRU(hidden_size)(x)

    # x = Flatten()(x)
    x = Dense(n_params * 2, activation='LeakyReLU')(x)
    x = Dense(n_params, activation='sigmoid')(x)

    model_ = Model(in_, x)
    model_.compile(optimizer='adam', loss='mse', metrics='acc')
    return model_


def parallel_sample(*data, batch_size=1):
    indexes = [random.randint(0, len(data[0]) - 1) for _ in range(batch_size)]
    samples = []
    for dat in data:
        samples.append([dat[i] for i in indexes])
    return samples


def train():
    steps = 1000
    batch_size = 32
    duration = 2.

    weights_filename = r"C:\dev_spa\dmuse\artear_pg_3\weights.h5"

    audio_data, param_data, midi_data, raw_audio_data = preprocess(batch_size, duration, 44100, 2205)
    print(audio_data[0].shape[1:], param_data[0].shape[-1])
    model = build_model(audio_data[0].shape[1:], param_data[0].shape[-1])
    model.summary()

    daw = SimpleDAW(plugin='C:/VstPlugins/64bit/Sylenth1.dll', sample_rate=44100, bpm=120)

    if os.path.exists(weights_filename):
        model.load_weights(weights_filename)

    for step in range(steps + 1):
        audios, params = parallel_sample(audio_data, param_data, batch_size=batch_size)
        audios, params = np.concatenate(audios), np.concatenate(params)
        loss, metrics = model.train_on_batch(audios, params)

        if step % 5 == 0:
            print('loss:', loss, 'acc:', metrics)

        if step % 50 == 0:
            audio, params_, midi, raw_audio = parallel_sample(audio_data, param_data, midi_data, raw_audio_data, batch_size=1)
            pred = model.predict(audio)

            print('正在播放原音频：', params_[0][0, :10])
            raw = raw_audio[0]
            sd.play(raw[0, ...], samplerate=44100, blocking=True)
            print('播放结束')
            time.sleep(1.5)
            print('正在播放预测预置生成音频：', pred[0, :10])
            daw.load_midi(midi[0])
            daw.set_params(pred[0, ...].tolist())
            aud = daw.render(duration)
            print(raw.shape, aud.shape)
            sd.play(aud, samplerate=44100, blocking=True)
            print('播放结束')
            time.sleep(1.5)

            model.save_weights(weights_filename)


train()
