import os
import random
from datetime import datetime

import dawdreamer as daw
import numpy as np
from dqn import DQN
# from keras import Input
# from keras.layers import Dense, Conv2D, Flatten, Dropout, LSTM, Reshape, SeparableConv2D, Conv1D, Conv1DTranspose
# from keras.models import Model
# from keras import backend as K
# from keras.optimizer_v2 import adam
from keras.utils.np_utils import to_categorical


def initialize(dirs, sample_info, instr_name='Sylenth1'):
    preset_dir, midi_dir, output_dir = dirs
    sample_rate, buffer_size, _ = sample_info

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    print('正在获取路径下所有预设和MIDI文件……')
    preset_paths = common.get_files(preset_dir, 'fxp')
    midi_paths = common.get_files(midi_dir, 'mid')

    engine = daw.RenderEngine(sample_rate, buffer_size)

    instr = engine.make_plugin_processor('instr_1', common.plugin_paths[instr_name])
    engine.load_graph([(instr, [])])

    return (engine, instr), [preset_paths, midi_paths]


def render_from_params(params, midi, daw_info, sample_info, n_param_classes=8):
    engine, instr = daw_info
    sample_rate, _, cutoff_time = sample_info
    audio_ = []
    for i in range(params.shape[0]):
        for index, param in enumerate(params[i, ...]):
            instr.set_parameter(index, 1 / (1 + np.exp(-np.argmax(param).astype(np.float32) / float(n_param_classes))))
        instr.load_midi(midi)
        engine.render(cutoff_time + 1.)
        audio = engine.get_audio()
        audio_.append(audio.reshape((1,) + audio.shape))
    return np.vstack(audio_)


def get_data(paths, daw_info, sample_info, midi='', num=1, n_param_classes=8):
    preset_paths, midi_paths = paths
    engine, instr = daw_info
    sample_rate, _, cutoff_time = sample_info
    audio_data, param_data, audios, midis = [], [], [], []

    print('正在读取数据……')
    for _ in range(num):
        instr.load_preset(random.choice(preset_paths))

        params = []
        for i in range(instr.get_plugin_parameter_size()):
            params.append(instr.get_parameter(i))
        params = to_categorical((np.array(params) * n_param_classes - 1).astype(np.int), n_param_classes + 1)
        param_data.append(params)

        if not midi:
            midi = random.choice(midi_paths)
        instr.load_midi(midi)
        midis.append(midi)

        engine.render(cutoff_time + 0.5)
        audio = engine.get_audio()
        audio_data.append(audio)

    return np.concatenate(audio_data).reshape((-1,) + audio_data[0].shape), \
        np.concatenate(param_data).reshape((-1,) + param_data[0].shape), midis


def train(paths, daw_info, sample_info, save_root_dir, n_iter=10000, batch_size=8):
    save_dir = save_root_dir + '\\' + str(datetime.now()).split('.')[0].replace(':', '-').replace(' ', '_')
    os.mkdir(save_dir)
    os.mkdir(save_dir + '\\img')
    os.mkdir(save_dir + '\\wav_gen')
    weights_file = save_root_dir + '\\artear_pg_2.h5'
    test_log_dir = save_dir + '\\test_logs'
    os.mkdir(test_log_dir)

    midi = random.choice(paths[1])
    audio, params, _ = get_data(paths, daw_info, sample_info, midi=midi, num=1)

    def step(action):
        nonlocal audio, params, midi, daw_info, sample_info
        params = action.reshape((1,) + action.shape)
        audio = render_from_params(params, midi, daw_info, sample_info)

    def reset():
        nonlocal audio, params
        audio, params, _ = get_data(paths, daw_info, sample_info, midi=midi, num=1)

    dqn = DQN(input_shape=(512,), n_output=params.shape[-2])
    dqn.train(step_func=step, reset_func=reset, batch_size=batch_size)


if __name__ == '__main__':
    preset_dir = r'G:\编曲资源'
    midi_dir = r'G:\编曲资源'
    output_dir = r'C:\dev_spa\artear_pg_2'
    dirs = (preset_dir, midi_dir, output_dir)
    sample_info = (44100, 256, 2.)

    daw_info, paths = initialize(dirs, sample_info, instr_name='Sylenth1')
    train(paths, daw_info, sample_info, output_dir, n_iter=10000, batch_size=16)
