import random

import librosa
import numpy as np
import paddle
import sounddevice as sd
import soundfile as sf
import gym
from PIL.Image import Image
from gym import spaces
from scipy.signal import resample
from ssqueezepy import ssq_stft, ssq_cwt

from data_gen.utils.util import get_files
from Artear.utils import simpledaw


def load_audio_as_spec(raw_audio_path, shape, compress_rate):
    raw = sf.read(raw_audio_path)

    dat_0 = resample(raw[:, 0], int(raw.shape[-2] / compress_rate))
    dat_1 = resample(raw[:, 1], int(raw.shape[-2] / compress_rate))
    dat_2 = resample(librosa.to_mono(raw), int(raw.shape[-2] / compress_rate))

    spec_0, *_ = ssq_stft(dat_0)
    spec_1, *_ = ssq_stft(dat_1)
    spec_2, *_ = ssq_cwt(dat_2)

    spec_0 = np.abs(spec_0)
    spec_1 = np.abs(spec_1)
    spec_2 = np.abs(spec_2)

    img_ = Image.fromarray(spec_0)
    img_ = img_.resize(shape)
    spec_0 = np.array(img_)
    img_ = Image.fromarray(spec_1)
    img_ = img_.resize(shape)
    spec_1 = np.array(img_)
    img_ = Image.fromarray(spec_2)
    img_ = img_.resize(shape)
    spec_2 = np.array(img_)

    return np.stack([spec_0, spec_1, spec_2], axis=-1)


class Audio2AudioDataset(paddle.io.Dataset):
    def __init__(self, mode='train', **config):
        super(Audio2AudioDataset, self).__init__()
        self.task_type = 'audio2audio'

        self.data = []
        if mode == 'train':
            train_samples = get_files(config['train_sample_dir'], 'wav')
            train_labels = get_files(config['train_label_dir'], 'wav')
            for sample, label in zip(train_samples, train_labels):
                self.data.append((sample, label))
        else:
            test_samples = get_files(config['test_sample_dir'], 'wav')
            test_labels = get_files(config['test_label_dir'], 'wav')
            for sample, label in zip(test_samples, test_labels):
                self.data.append((sample, label))

    def __getitem__(self, index):
        sample = load_audio_as_spec(self.data[index][0], shape=(224, 224), compress_rate=self.config['compress_rate'])
        label = load_audio_as_spec(self.data[index][1], shape=(224, 224), compress_rate=self.config['compress_rate'])

        return sample, label

    def __len__(self):
        return len(self.data)


def get_img_output_length(width, height):
    def get_output_length(input_length):
        # input_length += 6
        filter_sizes = [2, 2, 2, 2, 2]
        padding = [0, 0, 0, 0, 0]
        stride = 2
        for i in range(5):
            input_length = (input_length+2*padding[i]-filter_sizes[i]) // stride + 1
        return input_length
    return get_output_length(width)*get_output_length(height)


class AudioSimilarityNetwork(paddle.nn.Layer):
    def __init__(self, input_shape, pretrained=False):
        super(AudioSimilarityNetwork, self).__init__()
        self.vgg = paddle.vision.models.vgg16(pretrained)

        del self.vgg.avgpool
        del self.vgg.classifier

        flat_shape = 512 * get_img_output_length(input_shape[1], input_shape[0])
        self.fully_connect1 = paddle.nn.Linear(flat_shape, 512)
        self.fully_connect2 = paddle.nn.Linear(512, 1)

    def forward(self, x):
        x1, x2 = x
        x1 = self.vgg.features(x1)
        x2 = self.vgg.features(x2)
        b, _, _, _ = x1.size()
        x1 = x1.view([b, -1])
        x2 = x2.view([b, -1])
        x = paddle.abs(x1-x2)
        x = self.fully_connect1(x)
        x = self.fully_connect2(x)
        return x


class Simulator(gym.Env):
    def __init__(self, simple_daw, train_loader, test_loader, similarity_network, validate_func):
        n_params = simple_daw.count_params()
        self.action_space = spaces.Box(high=1., low=0., shape=(n_params,))
        self.observation_space = spaces.Box(high=1., low=0., shape=(n_params,))
        self.step_count = 0

        self.daw = simple_daw
        self.midi_paths = get_files(r'B:\muse_repo\MIDI', 'mid')
        self.train_loader =
        self.similarity_network = similarity_network
        self.validate_func = validate_func

    def step(self, action):
        state = np.array(list(action))
        # print(state.shape)

        predicts = self.similarity_network(x_data)
        acc = paddle.metric.accuracy(predicts, y_data)
        loss = loss_fn(predicts, y_data)
        loss.backward()
        if self.step_count % validate_every == 0:
            print("相似度判别器：batch_id: {}, loss: {}, acc: {}".format(self.step_count, loss.numpy(), acc.numpy()))
            validate_func(self.similarity_network(next(test_loader())[1][0]), config['validation_config'])
        optimizer.step()
        optimizer.clear_grad()

        reward = predicts

        self.step_count += 1
        print('[第{}轮训练]'.format(self.step_count), 'action:', action[:10], 'state:', state[:10])

        done = True
        info = {}
        return state, reward, done, info

    def reset(self):
        state = np.zeros(244)
        return state

    def render(self, mode='human'):
        pass

    def seed(self, seed=None):
        pass


mode = 'train'
if __name__ == '__main__':
    if mode == 'train':
        # 配置
        config = {
            'dataset_config': {
                'train_sample_dir': '',
                'test_sample_dir': '',
                'train_label_dir': '',
                'test_label_dir': '',
                'plugin_path': '',
                'preset_file_format': 'fxp',
                'compress_rate': 100.,
            },

            'validation_config': {
                'source_sample_rate': 44100.,
                'sample_rate': 44100,
                'bpm': 120,
            },
            'shuffle': True,
        }

        # 数据集、网络、损失函数
        dataset_class = Audio2AudioDataset
        train_dataset = dataset_class(mode='train', **config['dataset_config'])
        test_dataset = dataset_class(mode='test', **config['dataset_config'])
        similarity_network = AudioSimilarityNetwork((224,))
        loss_fn = paddle.nn.MSELoss()

        # 验证函数
        simple_daw = simpledaw.SimpleDAW(plugin=config['dataset_config']['plugin_path'],
                                         sample_rate=config['validation_config']['sample_rate'],
                                         bpm=config['validation_config']['bpm'])
        def validate_func(prediction, validation_config):
            simple_daw.set_params(list(prediction[0, ...]))
            audio = simple_daw.render(2.)
            print('正在播放预测参数')
            sd.play(audio, validation_config['source_sample_rate'], blocking=True)
            print('播放结束')

        # 准备数据集
        train_loader = paddle.io.DataLoader(train_dataset, batch_size=config['batch_size'],
                                            shuffle=config['shuffle'] if 'shuffle' in config else True)
        test_loader = paddle.io.DataLoader(test_dataset, batch_size=config['batch_size'],
                                           shuffle=config['shuffle'] if 'shuffle' in config else True)

        # 训练选项
        epochs = 5
        validate_every = 10
        optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=similarity_network.parameters())

        env = Simulator(simple_daw, train_loader, test_loader, similarity_network, validate_func)
        # 训练
        for epoch in range(epochs):

            model = DDPG(policy="MlpPolicy", env=env)
            for _ in range(100):
                try:
                    model.learn(total_timesteps=1)
                    model.save('pg_rl.zip')
                except Exception:
                    continue
